text stringlengths 4 1.02M | meta dict |
|---|---|
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
class Request(models.Model):
STATE_WAITING = "waiting"
STATE_IN_PROGRESS = "in-progress"
STATE_HOLD = "hold"
STATE_REJECTED = "rejected"
STATE_FINISHED = "finished"
STATES = (
(STATE_WAITING, "Waiting"),
(STATE_IN_PROGRESS, "In Progress"),
(STATE_HOLD, "On Hold"),
(STATE_REJECTED, "Rejected"),
(STATE_FINISHED, "Finished"),
)
sender = models.ForeignKey(
settings.AUTH_USER_MODEL, related_name="requests_received")
receiver = models.ForeignKey("users.Isp", related_name="requests_sent")
ixlan_id = models.PositiveIntegerField()
state = models.CharField(
choices=STATES, default=STATE_WAITING, max_length=8)
sender_is_ready = models.BooleanField(default=False)
receiver_is_ready = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
def __str__(self):
return "Peering request from {} to {}".format(
self.sender, self.receiver)
| {
"content_hash": "555c8339e754b32d43e4896a59b7be43",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 75,
"avg_line_length": 30.55263157894737,
"alnum_prop": 0.661498708010336,
"repo_name": "dotwaffle/pinder",
"id": "7fb97280595eb749f77d829db1465a6d84914deb",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinder/peering_requests/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3510"
},
{
"name": "HTML",
"bytes": "10926"
},
{
"name": "JavaScript",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "50636"
},
{
"name": "Ruby",
"bytes": "601"
},
{
"name": "Shell",
"bytes": "189"
}
],
"symlink_target": ""
} |
import unittest
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from s3query import s3open
class TestS3Query(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _test_iterate(self, s3_loc, output_lines):
ref_lines = iter(output_lines)
with s3open(s3_loc) as s3file:
for line in s3file:
self.assertEqual(line, ref_lines.next())
# Assert there is no lines in output_lines by check ref_lines throws
# StopIteration exception
self.assertRaises(StopIteration, ref_lines.next)
def _test_readline(self, s3_loc, output_lines):
ref_lines = iter(output_lines)
with s3open(s3_loc) as s3file:
line = s3file.readline()
while line != "":
self.assertEqual(line, ref_lines.next())
line = s3file.readline()
self.assertEqual("", s3file.readline())
# Assert there is no lines in output_lines by check ref_lines throws
# StopIteration exception
self.assertRaises(StopIteration, ref_lines.next)
def test_iterate_plain_files(self):
"""
Test it iterates the plain files on S3, given a directory
"""
s3_loc = "s3://s3query/data/test/"
output_lines = [
"one\n", "two\n", "three\n", "four\n", "five\n", "six\n",
"seven\n", "eight\n", "nine\n"]
self._test_iterate(s3_loc, output_lines)
def test_readline_plain_files(self):
"""
Test the readline function on plain files
"""
s3_loc = "s3://s3query/data/test/"
output_lines = [
"one\n", "two\n", "three\n", "four\n", "five\n", "six\n",
"seven\n", "eight\n", "nine\n"]
self._test_readline(s3_loc, output_lines)
def test_read_single_plain_file(self):
"""
Test reading single plain file
"""
s3_loc = "s3://s3query/data/test/part_2.txt"
output_lines = ["four\n", "five\n", "six\n", "seven\n"]
self._test_iterate(s3_loc, output_lines)
self._test_readline(s3_loc, output_lines)
def test_iterate_gz_files(self):
s3_loc = "s3://s3query/data/testgz/"
output_lines = [
"one\n", "two\n", "three\n", "four\n", "five\n", "six\n",
"seven\n", "eight\n", "nine\n"]
self._test_iterate(s3_loc, output_lines)
def test_readline_gz_files(self):
s3_loc = "s3://s3query/data/testgz/"
output_lines = [
"one\n", "two\n", "three\n", "four\n", "five\n", "six\n",
"seven\n", "eight\n", "nine\n"]
self._test_readline(s3_loc, output_lines)
def test_read_single_gz_file(self):
s3_loc = "s3://s3query/data/testgz/part_2.txt.gz"
output_lines = ["four\n", "five\n", "six\n", "seven\n"]
self._test_iterate(s3_loc, output_lines)
self._test_readline(s3_loc, output_lines)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "92d76e41a642a071ab23f658b28333e6",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 76,
"avg_line_length": 32.645161290322584,
"alnum_prop": 0.558300395256917,
"repo_name": "realstraw/s3query",
"id": "1f1b520071d3dd9709dd0ecf8634a16c028fc8c4",
"size": "3036",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_s3query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7400"
}
],
"symlink_target": ""
} |
import spacy
nlp = spacy.load('en_core_web_sm')
| {
"content_hash": "33dd64b617e08d221b9d02a911a60a27",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 34,
"avg_line_length": 16.333333333333332,
"alnum_prop": 0.6938775510204082,
"repo_name": "seakers/daphne_brain",
"id": "6ca121b8332469a92f920debe6151cc578f3e2b8",
"size": "49",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "daphne_brain/nlp_object.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "1683352"
},
{
"name": "Python",
"bytes": "1557398"
},
{
"name": "Shell",
"bytes": "4153"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .schema import (
SchemaOpts,
ModelSchema,
)
from .convert import (
ModelConverter,
fields_for_model,
property2field,
column2field,
field_for,
)
from .exceptions import ModelConversionError
__version__ = '0.3.0'
__license__ = 'MIT'
__all__ = [
'ModelSchema',
'SchemaOpts',
'ModelConverter',
'fields_for_model',
'property2field',
'column2field',
'ModelConversionError',
'field_for',
]
| {
"content_hash": "ba18067b105a07fa5d9a5bddafdd6c28",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 44,
"avg_line_length": 16.93103448275862,
"alnum_prop": 0.6354378818737271,
"repo_name": "dpwrussell/marshmallow-sqlalchemy",
"id": "15197d03a2071c06c2948d826e6c84d3ac1517de",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "marshmallow_sqlalchemy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "38422"
}
],
"symlink_target": ""
} |
import numpy as np
import os, sys, time
from commons.utils import logger
from commons import utils
from commons import evaluator
from commons import dataloader
# parameter config area
para = {'dataPath': '../data/',
'dataName': 'dataset#2',
'dataType': 'tp', # set the dataType as 'rt' or 'tp'
'outPath': 'result/',
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NPRE'], # delete where appropriate
'density': np.arange(0.05, 0.31, 0.05), # matrix density
'rounds': 20, # how many runs are performed at each matrix density
'dimension': 10, # dimenisionality of the latent factors
'eta': 0.8, # learning rate
'lambda': 0.0002, # regularization parameter
'maxIter': 50, # the max iterations
'convergeThreshold': 5e-2, # stopping criteria for convergence
'beta': 0.3, # the controlling weight of exponential moving average
'saveTimeInfo': True, # whether to keep track of the running time
'saveLog': True, # whether to save log into file
'debugMode': False, # whether to record the debug info
'parallelMode': True # whether to leverage multiprocessing for speedup
}
startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('AMF: Adaptive Matrix Factorization [TPDS]')
# load the dataset
dataTensor = dataloader.load(para)
# evaluate QoS prediction algorithm
evaluator.execute(dataTensor, para)
logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')
| {
"content_hash": "610d5282c88f5dcf64d018519d9384c7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 104,
"avg_line_length": 40.5,
"alnum_prop": 0.6313932980599647,
"repo_name": "wsdream/AMF",
"id": "ca5643c0d894684901b6a39d723d3c8f36f2a2ec",
"size": "1972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarks/run_tp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9003"
},
{
"name": "Python",
"bytes": "27145"
}
],
"symlink_target": ""
} |
from serpent.input_controller import InputController
from serpent.utilities import SerpentError
import itertools
class GameAPI:
instance = None
def __init__(self, game=None):
self.game = game
if not self.game.is_launched:
self.game.launch(dry_run=True)
self.input_controller = InputController(game=game, backend=game.input_controller)
self.game_inputs = dict()
self.__class__.instance = self
def combine_game_inputs(self, combination):
""" Combine game input axes in a single flattened collection
Args:
combination [list] -- A combination of valid game input axis keys
"""
# Validation
if not isinstance(combination, list):
raise SerpentError("'combination' needs to be a list")
for entry in combination:
if isinstance(entry, list):
for entry_item in entry:
if entry_item not in self.game_inputs:
raise SerpentError("'combination' entries need to be valid members of self.game_input...")
else:
if entry not in self.game_inputs:
raise SerpentError("'combination' entries need to be valid members of self.game_input...")
# Concatenate Grouped Axes (if needed)
game_input_axes = list()
for entry in combination:
if isinstance(entry, str):
game_input_axes.append(self.game_inputs[entry])
elif isinstance(entry, list):
concatenated_game_input_axis = dict()
for entry_item in entry:
concatenated_game_input_axis = {**concatenated_game_input_axis, **self.game_inputs[entry_item]}
game_input_axes.append(concatenated_game_input_axis)
# Combine Game Inputs
game_inputs = dict()
if not len(game_input_axes):
return game_inputs
for keys in itertools.product(*game_input_axes):
compound_label = list()
game_input = list()
for index, key in enumerate(keys):
compound_label.append(key)
game_input += game_input_axes[index][key]
game_inputs[" - ".join(compound_label)] = game_input
return game_inputs
| {
"content_hash": "b120da782aa1fb13c2e1f648a37faba3",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 115,
"avg_line_length": 31.84931506849315,
"alnum_prop": 0.589247311827957,
"repo_name": "SerpentAI/SerpentAI",
"id": "0e39ae64a14febe753a39776624623a5b6fda0da",
"size": "2325",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "serpent/game_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1359239"
},
{
"name": "Python",
"bytes": "396003"
}
],
"symlink_target": ""
} |
import json
class Mappable(object):
"""Dummy iface for plain object extraction"""
__slots__ = ()
def to_map(self):
"""
:return:
:rtype: dict of (str, str)
"""
result = {}
for key in self.__slots__:
field = getattr(self, key)
if isinstance(field, Mappable):
result[key] = field.to_json()
else:
result[key] = field
return result
def to_json(self):
return json.dumps(self.to_map(), sort_keys=True)
@staticmethod
def from_str(obj):
return None
class ListAsMappable(Mappable):
"""Mappable representation for list of instances"""
__slots__ = ('__inst', '__data')
def __init__(self, inst):
self.__inst = inst
self.__data = []
def __iter__(self):
return self.__data.__iter__()
def __eq__(self, other):
if isinstance(other, ListAsMappable):
return self.__data == other.__data
else:
return False
def populate(self, collection):
for elem in collection:
self.__data.append(self.__inst(elem))
def __repr__(self):
return str(self.__data)
def to_map(self):
result = []
for inst in self.__data:
result.append(inst.to_map())
return result
class NoneAsMappable(Mappable):
"""Mappable representation for None(nothing)"""
__slots__ = ()
def __eq__(self, other):
return isinstance(other, NoneAsMappable)
def __repr__(self):
return None
def to_json(self):
return ''
def __str__(self):
return ''
@staticmethod
def from_str(obj):
return NoneAsMappable()
class StringAsMappable(Mappable):
"""Mappable representation for string"""
__slots__ = ('__data')
def __init__(self, string):
self.__data = string
def __eq__(self, other):
if isinstance(other, StringAsMappable):
return self.__data == other.__data
else:
return False
def __repr__(self):
return self.__data
def to_json(self):
return self.__data
@staticmethod
def from_str(obj):
return StringAsMappable(obj)
| {
"content_hash": "9bb20300f071458ea3e7823a79dd0c59",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 56,
"avg_line_length": 21.97087378640777,
"alnum_prop": 0.5311533362792753,
"repo_name": "vt-dev/sdk",
"id": "753ca8a9fed6110f2e81c569a3f87790e1a1c2ce",
"size": "2263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/sdk/data/Mappable.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "211981"
},
{
"name": "Makefile",
"bytes": "161"
},
{
"name": "Python",
"bytes": "70613"
}
],
"symlink_target": ""
} |
import json
import logging
import os
from yoconfigurator.base import get_config_module
from yoconfigurator.dicts import DotDict, MissingValue
log = logging.getLogger(__name__)
class LenientJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, MissingValue):
return '### MISSING VALUE ###'
return super(LenientJSONEncoder, self).default(obj)
def config_sources(app, environment, cluster, configs_dirs, app_dir,
local=False, build=False):
"""Return the config files for an environment & cluster specific app."""
sources = [
# Machine-specific
(configs_dirs, 'hostname'),
(configs_dirs, 'hostname-local'),
(configs_dirs, 'hostname-build'),
# Global
(configs_dirs, 'common'),
# Environment + Cluster
(configs_dirs, 'common-%s' % environment),
(configs_dirs, 'common-%s-%s' % (environment, cluster)),
(configs_dirs, 'common-local'),
(configs_dirs, 'common-build'),
# Machine-specific overrides
(configs_dirs, 'common-overrides'),
# Application-specific
([app_dir], '%s-default' % app),
([app_dir], '%s-%s' % (app, environment)),
([app_dir], '%s-%s-%s' % (app, environment, cluster)),
(configs_dirs, app),
(configs_dirs, '%s-%s' % (app, environment)),
(configs_dirs, '%s-%s-%s' % (app, environment, cluster)),
([app_dir], '%s-local' % app),
([app_dir], '%s-build' % app),
(configs_dirs, '%s-local' % app),
(configs_dirs, '%s-build' % app),
# Machine-specific application override
(configs_dirs, '%s-overrides' % app),
]
# Filter out build sources if not requested
if not build:
sources = [source for source in sources
if not source[1].endswith('-build')]
# Filter out local sources if not build and not local
if not local:
sources = [source for source in sources
if not source[1].endswith('-local')]
return available_sources(sources)
def available_sources(sources):
"""Yield the sources that are present."""
for dirs, name in sources:
for directory in dirs:
fn = os.path.join(directory, name) + '.py'
if os.path.isfile(fn):
yield fn
def smush_config(sources, initial=None):
"""Merge the configuration sources and return the resulting DotDict."""
if initial is None:
initial = {}
config = DotDict(initial)
for fn in sources:
log.debug('Merging %s', fn)
mod = get_config_module(fn)
config = mod.update(config)
log.debug('Current config:\n%s', json.dumps(config, indent=4,
cls=LenientJSONEncoder))
return config
| {
"content_hash": "ac75d79997527ce90d7b137f50330a12",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 76,
"avg_line_length": 34.11904761904762,
"alnum_prop": 0.5792044661549197,
"repo_name": "yola/yoconfigurator",
"id": "58af306ed7af172e8cc8b77a5530e67aec3fff64",
"size": "2866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yoconfigurator/smush.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35669"
}
],
"symlink_target": ""
} |
''' A plot showing the daylight hours in a city's local time (which are
discontinuous due to summer time) using the ``Patch`` and ``Line`` plot
elements to draw custom outlines and fills.
.. bokeh-example-metadata::
:sampledata: daylight
:apis: bokeh.models.glyphs.Line, bokeh.models.glyphs.Patch, bokeh.models.plots.Plot.add_glyph, bokeh.document.document.Document
:refs: :ref:`ug_basic_areas_patches`
:keywords: outline, shading, fill
'''
import datetime as dt
from time import mktime
import numpy as np
from bokeh.core.properties import value
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models import (ColumnDataSource, DatetimeAxis, DatetimeTickFormatter,
FixedTicker, Legend, LegendItem, Line, Patch, Plot, Text)
from bokeh.resources import INLINE
from bokeh.sampledata import daylight
from bokeh.util.browser import view
df = daylight.daylight_warsaw_2013
source = ColumnDataSource(dict(
dates = df.Date,
sunrises = df.Sunrise,
sunsets = df.Sunset,
))
patch1_source = ColumnDataSource(dict(
dates = np.concatenate((df.Date, df.Date[::-1])),
times = np.concatenate((df.Sunrise, df.Sunset[::-1]))
))
summer = df[df.Summer == 1]
patch2_source = ColumnDataSource(dict(
dates = np.concatenate((summer.Date, summer.Date[::-1])),
times = np.concatenate((summer.Sunrise, summer.Sunset[::-1]))
))
summer_start = df.Summer.tolist().index(1)
summer_end = df.Summer.tolist().index(0, summer_start)
calendar_start = df.Date.iloc[0]
summer_start = df.Date.iloc[summer_start]
summer_end = df.Date.iloc[summer_end]
calendar_end = df.Date.iloc[-1]
d1 = calendar_start + (summer_start - calendar_start)/2
d2 = summer_start + (summer_end - summer_start)/2
d3 = summer_end + (calendar_end - summer_end)/2
text_source = ColumnDataSource(dict(
dates = [d1, d2, d3],
times = [dt.time(11, 30)]*3,
texts = ["CST (UTC+1)", "CEST (UTC+2)", "CST (UTC+1)"],
))
plot = Plot(width=800, height=400)
plot.title.text = "Daylight Hours 2013 - Warsaw, Poland"
plot.toolbar_location = None
plot.x_range.range_padding = 0
patch1 = Patch(x="dates", y="times", fill_color="#282e54")
plot.add_glyph(patch1_source, patch1)
patch2 = Patch(x="dates", y="times", fill_color="#ffdd91")
plot.add_glyph(patch2_source, patch2)
sunrise_line = Line(x="dates", y="sunrises", line_color="orange", line_width=4)
sunrise_line_renderer = plot.add_glyph(source, sunrise_line)
sunset_line = Line(x="dates", y="sunsets", line_color="crimson", line_width=4)
sunset_line_renderer = plot.add_glyph(source, sunset_line)
text = Text(x="dates", y="times", text="texts", text_align="center", text_color="grey")
plot.add_glyph(text_source, text)
xformatter = DatetimeTickFormatter(months="%b %d %Y")
min_time = dt.datetime.min.time()
xticker = FixedTicker(ticks=[
mktime(dt.datetime.combine(summer_start, min_time).timetuple()) * 1000,
mktime(dt.datetime.combine(summer_end, min_time).timetuple()) * 1000
])
xaxis = DatetimeAxis(formatter=xformatter, ticker=xticker)
plot.add_layout(xaxis, 'below')
yaxis = DatetimeAxis()
yaxis.formatter.hours = '%H:%M'
plot.add_layout(yaxis, 'left')
legend = Legend(items=[
LegendItem(label=value('sunset'), renderers=[sunset_line_renderer]),
LegendItem(label=value('sunrise'), renderers=[sunrise_line_renderer]),
])
plot.add_layout(legend)
doc = Document()
doc.add_root(plot)
if __name__ == "__main__":
doc.validate()
filename = "daylight.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Daylight Plot"))
print("Wrote %s" % filename)
view(filename)
| {
"content_hash": "cd4c635513987d903dddfb58d54c1d80",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 131,
"avg_line_length": 32.7027027027027,
"alnum_prop": 0.6994490358126721,
"repo_name": "bokeh/bokeh",
"id": "385991e38948c79d99b01c357ad926e6542cba67",
"size": "3630",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "examples/models/daylight.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
} |
'''
The base41 encoding scheme encodes pairs of bytes to three ASCII chars.
A single trailing byte is encoded to either a single ASCII char or to two
ASCII chars depending on its byte value.
The chars are all safe to use in filenames, pathnames, URIs, etc
This gives it an advantage over base64 which uses '/' as one of the letters
of its alphabet. In order to get round this limitation in base64, programmers
would have to remember to pass the correct `altchars` to b64encode and to
pass the same `altchars` to b64decode. This has led to many bugs in systems
that use slightly different base64 alphabets for different things.
Base41 solves this by using an alphabet that consists only of letters and
digits: 0123456789jklmnopABCDEFGHIJKLMNOPQRSTUVWX
Each byte of this alphabet obeys the rule: (byte % 48) == value
giving the choice of alphabet an internal logic rather than it just being a
substitution cipher.
Example Usage:
# encode a UUID to base41
>>> import uuid
>>> u=uuid.UUID('a98bd614-b023-45c2-99b1-057a3edeff92')
>>> b=b41encode(u.bytes)
>>> print(b.decode('ascii')) # for the benefit of python2/3 compatibility
IQILHPPQJGIjJpG8R0FG9OUV
>>> d=b41decode(b)
>>> uuid.UUID(bytes=d)
UUID('a98bd614-b023-45c2-99b1-057a3edeff92')
In the above example we encoded a UUID (usually represented using 36 chars)
to a string of 24 base41 chars. We could then use this string as a filename,
directory name or in a URI. If we would have used base64, the encoded string
would have been 'qYvWFLAjRcKZsQV6Pt7/kg==' which not only has annoying padding
chars, but contains an embedded slash.
Copyright: (c) Jazzy Services Limited 2017
License file: ./LICENSE
'''
# _________
# _| helpers |________________________________________________________________
def out_tostring(src):
return ''.join(chr(i) for i in src)
if bytes == str:
out_tobytes = out_tostring
def in_tobytes(src):
return (ord(i) for i in src)
else:
def out_tobytes(src):
return bytes(src)
def in_tobytes(src):
return src
# __________
# _| ENCODING |_______________________________________________________________
# The standard base41 alphabet
# These are BYTEs rather than CHARs.
# The bytearray() call is for the benefit of Python 2 to ensure that
# the alphabet is bytes.
ALFA = bytearray(b'0123456789jklmnopABCDEFGHIJKLMNOPQRSTUVWX')
# Convert a BYTE-PAIR to UINT16
# Python3 has from_bytes()
# Python2 does it "long hand"
# The encoding scheme always treats a BYTE-PAIR as a big-endian UINT16
if hasattr(int, 'from_bytes'):
def pair_to_uint16(pair):
'Convert a pair of bytes to UINT16.'
return int.from_bytes(pair, 'big')
else:
def pair_to_uint16(pair):
'Convert a pair of bytes to UINT16.'
return pair[0] << 8 | pair[1]
class B41Encoder(object):
'Iterator to encode a byte source to the Base41 Alphabet.'
def __init__(my, src):
# Source iterator
my.octets = iter(src)
# "working storage" ;)
my.pair = bytearray()
def u16_iter(my):
'MANY octet-pair => MANY UINT16'
while True:
# read two octets (may raise a StopIteration during either read)
my.pair = bytearray()
my.pair.append(next(my.octets))
my.pair.append(next(my.octets))
u16 = pair_to_uint16(my.pair)
yield u16
def __iter__(my):
'MANY UINT16 => MANY B41-char'
for u16 in my.u16_iter():
# -- B41 TRIPLE --
# low-order B41-Char
himid,lo = divmod(u16, 41)
yield ALFA[lo]
# middle- and high-order B41-Chars
hi,mid = divmod(himid, 41)
yield ALFA[mid]
yield ALFA[hi]
# encode the trailing byte
try:
hi,lo = divmod(my.pair[0], 41)
yield ALFA[lo]
if hi > 0:
yield ALFA[hi]
except IndexError:
pass
def b41encode(src):
'Encode a byte source using base41 encoding.'
return out_tobytes(byt for byt in B41Encoder(src))
def b41string(src):
'Encode a byte source using base41 encoding.'
return out_tostring(byt for byt in B41Encoder(src))
class B41U16Encoder(object):
'Iterator to encode a source of UINT16s to the Base41 Alphabet.'
def __init__(my, src):
# Source iterator
my.u16_iter = iter(src)
def __iter__(my):
'MANY UINT16 => MANY B41-char'
for u16 in my.u16_iter:
# -- B41 TRIPLE --
# low-order B41-Char
himid,lo = divmod(u16, 41)
yield ALFA[lo]
# middle- and high-order B41-Chars
hi,mid = divmod(himid, 41)
yield ALFA[mid]
yield ALFA[hi]
# __________
# _| DECODING |_______________________________________________________________
class UnderflowError(ArithmeticError):
'Underflow error.'
def __init__(self, *args):
super(UnderflowError, self).__init__(*args)
def b41d_filter(src):
'Yield only B41-BYTEs from a byte source.'
for byte in src:
if (32 < byte < 127) and ((byte % 48) < 41):
yield byte % 48
def decode_triplet(triplet):
'Decode a triplet of base41-encoded bytes.'
u16 = triplet[0] + 41 * (triplet[1] + 41 * triplet[2])
if u16 >= 65536:
msg = 'decoding three bytes gave a value ({0}) greater than 65535'
overflow = msg.format(u16)
raise OverflowError(overflow)
return u16
def decode_double(double):
'Decode a pair of trailing bytes from base41.'
u8 = double[0] + 41 * double[1]
if u8 < 41:
msg = 'decoding two bytes gave a value ({0}) less than 41'
underflow = msg.format(u8)
raise UnderflowError(underflow)
if u8 >= 256:
msg = 'decoding two bytes gave a value ({0}) greater than 255'
overflow = msg.format(u8)
raise OverflowError(overflow)
return u8
def decode_single(single):
'Decode a single trailing byte from base41.'
u8 = single[0]
assert u8 < 41, 'Overflow error: u8={}'.format(u8)
return u8
class B41Decoder(object):
'Iterator to decode a base41-encoded byte source.'
def __init__(my, src):
my.src = b41d_filter(src)
my.acc = bytearray()
def u16_iter(my):
# MANY b41-triplet => MANY UINT16
while True:
my.acc = bytearray()
my.acc.append(next(my.src))
my.acc.append(next(my.src))
my.acc.append(next(my.src))
u16 = decode_triplet(my.acc)
yield u16
def __iter__(my):
for u16 in my.u16_iter():
yield u16 >> 8
yield u16 & 0xff
#
if len(my.acc) == 2:
yield decode_double(my.acc)
elif len(my.acc) == 1:
yield decode_single(my.acc)
def b41decode(src):
'Decode a base41-encoded byte source.'
dsrc = in_tobytes(src)
return out_tobytes(byt for byt in B41Decoder(dsrc))
class B41U16Decoder(object):
'Iterator to decode a base41-encoded byte source to UINT16s.'
def __init__(my, src):
my.src = b41d_filter(src)
def __iter__(my):
# MANY b41-triplet => MANY UINT16
acc = bytearray()
while True:
acc.append(next(my.src))
acc.append(next(my.src))
acc.append(next(my.src))
u16 = decode_triplet(acc)
acc = bytearray()
yield u16
# Trailing bytes not allowed
assert len(acc) == 0, 'Trailing bytes while decoding to UINT16'
| {
"content_hash": "fd1c859d18170b26fc4e1502591085ba",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 78,
"avg_line_length": 33.26431718061674,
"alnum_prop": 0.6013773010197325,
"repo_name": "JazzyServices/jazzy",
"id": "4229c1246eb04c2fdf3c0dd506cbf6bb4bba83d1",
"size": "7612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base41.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "81831"
}
],
"symlink_target": ""
} |
from django.shortcuts import render
from pupa.models import RunPlan
from opencivicdata.core.models import (Jurisdiction, Person, Organization,
Membership, PersonName, Post)
from opencivicdata.legislative.models import (Bill, VoteEvent, BillSponsorship,
LegislativeSession, PersonVote)
from admintools.issues import IssueType
from admintools.models import DataQualityIssue, IssueResolverPatch
from django.db.models import Count
from django.contrib.contenttypes.models import ContentType
from collections import defaultdict, Counter
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.db import transaction
import datetime
# Basic Classes with their Identifiers.
upstream = {'person': Person,
'organization': Organization,
'membership': Membership,
'bill': Bill,
'voteevent': VoteEvent,
'post': Post}
# get run status for a jurisdiction
def _get_run_status(jur):
runs = RunPlan.objects.filter(jurisdiction=jur).order_by('-end_time')
latest_date = runs.first().end_time.date()
status = 0
for run in runs:
if run.success:
break
else:
status += 1
return {'count': None if status == 1 else status, 'date': latest_date}
# validate date in YYYY-MM-DD format.
def validate_date(date_text):
try:
datetime.datetime.strptime(date_text, '%Y-%m-%d')
return True
except ValueError:
return False
# get pagination of results upto 20 objects
def _get_pagination(objects_list, request):
paginator = Paginator(objects_list, 20)
try:
page = int(request.GET.get('page', '1'))
except:
page = 1
try:
objects = paginator.page(page)
except(EmptyPage, InvalidPage):
objects = paginator.page(1)
# page_range to show at bottom of table
index = objects.number - 1
max_index = len(paginator.page_range)
start_index = index - 4 if index >= 4 else 0
end_index = index + 4 if index <= max_index - 4 else max_index
page_range = paginator.page_range[start_index:end_index]
return objects, page_range
# Status Page
def overview(request):
rows = {}
all_counts = DataQualityIssue.objects.values('jurisdiction', 'issue',
'alert').annotate(
Count('issue'))
for counts in all_counts:
jur = Jurisdiction.objects.get(id=counts['jurisdiction'])
rows.setdefault(counts['jurisdiction'], {})['jur_name'] = jur.name
rows[counts['jurisdiction']].setdefault(
counts['issue'].split('-')[0], {})
rows[counts['jurisdiction']][counts['issue'].split('-')[0]][
counts['alert']] = rows[counts['jurisdiction']][
counts['issue'].split('-')[0]].get(
counts['alert'], 0) + counts['issue__count']
if not rows[counts['jurisdiction']].get('run'):
rows[counts['jurisdiction']]['run'] = _get_run_status(jur)
# RunPlan For those who don't have any type of dataquality_issues
rest_jurs = Jurisdiction.objects.exclude(id__in=rows.keys())
for jur in rest_jurs:
rows[jur.id] = {}
rows[jur.id]['jur_name'] = jur.name
rows[jur.id]['run'] = _get_run_status(jur)
rows = sorted(rows.items(), key=lambda v: v[1]['jur_name'])
return render(request, 'admintools/index.html', {'rows': rows})
# Calculates all dataquality_issues in given jurisdiction
def _jur_dataquality_issues(jur_id):
cards = defaultdict(dict)
issues = IssueType.choices()
for issue, description in issues:
related_class = IssueType.class_for(issue)
cards[related_class][issue] = {}
issue_type = IssueType.class_for(issue) + '-' + issue
alert = IssueType.level_for(issue)
cards[related_class][issue]['alert'] = True if alert == 'error' \
else False
cards[related_class][issue]['description'] = description
ct_obj = ContentType.objects.get_for_model(upstream[related_class])
j = Jurisdiction.objects.filter(
id=jur_id, dataquality_issues__content_type=ct_obj,
dataquality_issues__issue=issue_type).annotate(_issues=Count(
'dataquality_issues'))
cards[related_class][issue]['count'] = j[0]._issues if j else 0
return dict(cards)
# Jurisdiction Specific Page
def jurisdiction_intro(request, jur_id):
issues = _jur_dataquality_issues(jur_id)
bill_from_orgs_list = Bill.objects.filter(
legislative_session__jurisdiction__id=jur_id) \
.values('from_organization__name').distinct()
voteevent_orgs_list = VoteEvent.objects.filter(
legislative_session__jurisdiction__id=jur_id) \
.values('organization__name').distinct()
orgs_list = Organization.objects.filter(
jurisdiction__id=jur_id).values('classification').distinct()
context = {'jur_id': jur_id,
'cards': issues,
'bill_orgs': bill_from_orgs_list,
'voteevent_orgs': voteevent_orgs_list,
'orgs': orgs_list,
}
return render(request, 'admintools/jurisdiction_intro.html', context)
# Bills and VoteEvents related info for a session
def legislative_session_info(request, jur_id, identifier):
session = LegislativeSession.objects.get(
jurisdiction__id=jur_id, identifier=identifier)
bill_from_orgs_list = Bill.objects.filter(
legislative_session__jurisdiction__id=jur_id,
legislative_session__identifier=identifier) \
.values('from_organization__name').distinct()
voteevent_orgs_list = VoteEvent.objects.filter(
legislative_session__jurisdiction__id=jur_id,
legislative_session__identifier=identifier) \
.values('organization__name').distinct()
context = {
'jur_id': jur_id,
'session': session,
'bill_orgs': bill_from_orgs_list,
'voteevent_orgs': voteevent_orgs_list,
}
return render(request, 'admintools/legislative_session_info.html', context)
# Filter Results
def _filter_results(request):
query = Q()
if request.GET.get('person'):
query = Q(name__icontains=request.GET.get('person'))
if request.GET.get('organization'):
query &= Q(name__icontains=request.GET.get('organization'))
if request.GET.get('org_classification'):
query &= Q(classification__icontains=request.GET.get(
'org_classification'))
if request.GET.get('bill_identifier'):
query &= Q(identifier__icontains=request.GET.get('bill_identifier'))
if request.GET.get('bill_session'):
query &= Q(legislative_session__name__icontains=request.GET.get(
'bill_session'))
if request.GET.get('voteevent_bill'):
query &= Q(bill__identifier__icontains=request.GET.get(
'voteevent_bill'))
if request.GET.get('voteevent_org'):
query &= Q(organization__name__icontains=request.GET.get(
'voteevent_org'))
if request.GET.get('membership'):
query &= Q(person_name__icontains=request.GET.get('membership'))
if request.GET.get('membership_org'):
query &= Q(organization__name__icontains=request.GET.get(
'membership_org'))
if request.GET.get('post_role'):
query &= Q(role__icontains=request.GET.get('post_role'))
if request.GET.get('post_label'):
query &= Q(label__icontains=request.GET.get('post_label'))
return query
# Lists given issue(s) related objetcs
def list_issue_objects(request, jur_id, related_class, issue_slug):
description = IssueType.description_for(issue_slug)
issue = IssueType.class_for(issue_slug) + '-' + issue_slug
objects_list = DataQualityIssue.objects.filter(
jurisdiction_id=jur_id,
issue=issue).values_list('object_id', flat=True)
cards = upstream[related_class].objects.filter(id__in=objects_list)
if request.GET:
cards = cards.filter(_filter_results(request))
objects, page_range = _get_pagination(cards.order_by('id'), request)
# url_slug used to address the Django-admin page
if related_class in ['person', 'organization', 'post']:
url_slug = 'core_' + related_class + '_change'
elif related_class in ['bill', 'voteevent']:
url_slug = 'legislative_' + related_class + '_change'
else:
# because we don't have membership objetcs listed on Django-admin panel
# so redirect to related organization page
url_slug = None
context = {'jur_id': jur_id,
'issue_slug': issue_slug,
'objects': objects,
'description': description,
'page_range': page_range,
'url_slug': url_slug,
'related_class': related_class}
return render(request, 'admintools/list_issues.html', context)
# prepare data to be loaded into main DB.
def _prepare_import(issue_slug, posted_data):
if issue_slug == 'missing-photo':
issue_items = dict((k, v) for k, v in posted_data.items()
if v and not k.startswith('csrf'))
elif issue_slug in ['missing-phone', 'missing-email', 'missing-address']:
issue_items = defaultdict(dict)
count = 1
for k, v in posted_data.items():
if v and not k.startswith('csrf') and not k.startswith('note'):
c = k.split("ocd-person/")
# using custom hash because two legislators can have same Phone
# numbers for eg, `State House Message Phone`
hash_ = str(count) + '__@#$__' + v
issue_items[hash_]['id'] = "ocd-person/" + c[1]
issue_items[hash_]['code'] = c[0]
count += 1
for hash_, item in issue_items.items():
issue_items[hash_]['note'] = posted_data['note_' + item['code']
+ item['id']]
else:
raise ValueError("Person Issue Resolver needs update for new issue.")
return issue_items
# creates `unreviewed` patches into DB applied_by `admin` for `missing` values.
@transaction.atomic
def person_resolve_issues(request, jur_id, issue_slug):
if request.method == 'POST':
if issue_slug == 'missing-phone':
category = 'voice'
elif issue_slug == 'missing-photo':
category = 'image'
elif issue_slug in ['missing-email', 'missing-address']:
category = issue_slug[8:]
else:
raise ValueError("Person Resolver needs update for new issue.")
jur = Jurisdiction.objects.get(id=jur_id)
issue_items = _prepare_import(issue_slug, request.POST)
for hash_, items in issue_items.items():
if issue_slug != 'missing-photo':
new_value = hash_.split('__@#$__')[1]
p = Person.objects.get(id=items.get('id'))
note = items.get('note')
else:
# hash_ == ocd id of person here.
new_value = items
p = Person.objects.get(id=hash_)
note = ''
patch = IssueResolverPatch.objects.create(
content_object=p,
jurisdiction=jur,
status='unreviewed',
new_value=new_value,
note=note,
category=category,
alert='warning',
applied_by='admin',
)
patch.save()
if issue_items:
messages.success(request, 'Successfully created {} {}(s) Admin '
'Patch(es)'.format(len(issue_items),
IssueType.description_for(
issue_slug)))
return HttpResponseRedirect(reverse('list_issue_objects',
args=(jur_id, 'person',
issue_slug)))
# lists `unreviewed` patches for review.
def review_person_patches(request, jur_id):
if request.method == 'POST':
for k, v in request.POST.items():
if not k.startswith('csrf'):
# k = 'category__patch_id__object_id'
# v = `status`
c = k.split("__")
if (c[0] == 'image' or c[0] == 'name') and v == 'approved':
try:
# mark alerady approved patch as deprecated (if any!)
approved_patch = IssueResolverPatch.objects.get(
object_id=c[2], category=c[0], status='approved')
approved_patch.status = 'deprecated'
approved_patch.save()
except IssueResolverPatch.DoesNotExist:
pass
patch = IssueResolverPatch.objects.get(id=c[1])
patch.status = v
patch.save()
if len(request.POST)-1:
messages.success(request, 'Successfully updated status of {} '
'Patch(es)'.format(len(request.POST)-1))
patches = IssueResolverPatch.objects \
.filter(status='unreviewed', jurisdiction_id=jur_id)
# To maintain applied filter in template
category_search = False
alert_search = False
applied_by_search = False
# Filters Results
if request.GET.get('person'):
person_ids = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id,
name__icontains=request.GET.get('person'))
patches = patches.filter(object_id__in=person_ids)
if request.GET.get('category'):
patches = patches.filter(category=request.GET.get('category'))
category_search = request.GET.get('category')
if request.GET.get('alert'):
patches = patches.filter(alert=request.GET.get('alert'))
alert_search = request.GET.get('alert')
if request.GET.get('applied_by'):
patches = patches.filter(applied_by=request.GET.get('applied_by'))
applied_by_search = request.GET.get('applied_by')
objects, page_range = _get_pagination(patches.order_by('id'), request)
categories_ = sorted(dict(IssueResolverPatch._meta.get_field(
'category').choices).items())
alerts_ = sorted(dict(IssueResolverPatch._meta.get_field(
'alert').choices).items())
context = {'jur_id': jur_id,
'patches': objects,
'page_range': page_range,
'alert_search': alert_search,
'category_search': category_search,
'applied_by_search': applied_by_search,
'categories_': categories_,
'alerts_': alerts_}
return render(request, 'admintools/review_person_patches.html', context)
# list all patches and functionality to modify `status`.
def list_all_person_patches(request, jur_id):
if request.method == 'POST':
count = 0
for k, v in request.POST.items():
if not k.startswith('csrf'):
pa = IssueResolverPatch.objects.get(id=k)
# if category is `name` or `image` and updated status is
# approved then make sure to display error if there is already
# a approved patch is present.
if pa.category in ['image', 'name'] and v == 'approved' \
and pa.status != v:
hg = IssueResolverPatch.objects \
.filter(jurisdiction_id=jur_id, object_id=pa.object_id,
category=pa.category, status='approved')
if hg:
per = Person.objects.get(id=pa.object_id)
messages.error(request, "Multiple Approved Pathces for"
" {} ({})".format(per.name, pa.category)
)
continue
# if `status` is changed.
if pa.status != v:
pa.status = v
pa.save()
count += 1
if count:
messages.success(request, "Successfully Updated Status of "
"{} Patch(es)".format(count))
patches = IssueResolverPatch.objects \
.filter(jurisdiction_id=jur_id)
# To maintain applied filter in template
category_search = False
alert_search = False
applied_by_search = False
status_search = False
# Filter Results
if request.GET.get('person'):
person_ids = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id,
name__icontains=request.GET.get('person'))
patches = patches.filter(object_id__in=person_ids)
if request.GET.get('category'):
patches = patches.filter(category=request.GET.get('category'))
category_search = request.GET.get('category')
if request.GET.get('alert'):
patches = patches.filter(alert=request.GET.get('alert'))
alert_search = request.GET.get('alert')
if request.GET.get('applied_by'):
patches = patches.filter(applied_by=request.GET.get('applied_by'))
applied_by_search = request.GET.get('applied_by')
if request.GET.get('status'):
patches = patches.filter(status=request.GET.get('status'))
status_search = request.GET.get('status')
objects, page_range = _get_pagination(patches.order_by('id'), request)
categories_ = sorted(dict(IssueResolverPatch._meta.get_field(
'category').choices).items())
alerts_ = sorted(dict(IssueResolverPatch._meta.get_field(
'alert').choices).items())
status_ = sorted(dict(IssueResolverPatch._meta.get_field(
'status').choices).items())
context = {'jur_id': jur_id,
'patches': objects,
'page_range': page_range,
'alert_search': alert_search,
'category_search': category_search,
'applied_by_search': applied_by_search,
'status_search': status_search,
'categories_': categories_,
'status_': status_,
'alerts_': alerts_}
return render(request, 'admintools/list_person_patches.html', context)
# To retire legislator(s)
def retire_legislators(request, jur_id):
if request.method == 'POST':
count = 0
for k, v in request.POST.items():
if v and not k.startswith('csrf'):
v = v.replace('/', '-')
# validate date in YYYY-MM-DD format
resp = validate_date(v)
p = Person.objects.get(id=k)
if resp:
# To make sure that provided retirement date is not less
# than all of the existing end_dates
prev = p.memberships.filter(end_date__gt=v).count()
if prev:
messages.error(request, 'Provide a valid Retirement '
'Date for {}'.format(p.name))
else:
mem = p.memberships.filter(end_date='')
mem.update(end_date=v)
count += 1
else:
messages.error(request, 'Provide a valid Retirement '
'Date for {}'.format(p.name))
if count:
messages.success(request, 'Successfully Retired {} '
'legislator(s)'.format(count))
if request.GET.get('person'):
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id) \
.filter(memberships__end_date='',
name__icontains=request.GET.get('person')).distinct()
else:
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id) \
.filter(memberships__end_date='').distinct()
objects, page_range = _get_pagination(people.order_by('name'), request)
context = {'jur_id': jur_id,
'people': objects,
'page_range': page_range}
return render(request, 'admintools/retire_legislators.html', context)
# List all the retire legislator(s) and functionality to update/unretire them.
def list_retired_legislators(request, jur_id):
if request.method == 'POST':
count = 0
for k, v in request.POST.items():
if not k.startswith('csrf'):
p = Person.objects.get(id=k)
prev_retirement_date = p.memberships.order_by(
'-end_date').first().end_date
v = v.replace('/', '-')
# validate date in YYYY-MM-DD format
resp = validate_date(v)
if resp or v == '':
if v:
if resp:
# To make sure that provided retirement date is not
# less than the end_date, other than current
# retirement date
prev_date = p.memberships.filter(
end_date__lt=prev_retirement_date).order_by(
'-end_date').first()
if prev_date:
if prev_date.end_date > v:
messages.error(request, 'Provide a valid '
'Retirement Date for '
'{}'.format(p.name))
continue
else:
messages.error(request,
'Provide a valid Retirement Date'
' for {}'.format(p.name))
if prev_retirement_date != v:
p.memberships.filter(end_date=prev_retirement_date) \
.update(end_date=v)
count += 1
else:
messages.error(request,
'Provide a valid Retirement Date'
' for {}'.format(p.name))
if count:
messages.success(request, 'Successfully Updated {} '
'Retired legislator(s)'.format(count))
if request.GET.get('person'):
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id) \
.filter(~Q(memberships__end_date=''),
Q(name__icontains=request.GET.get('person'))).distinct()
else:
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id) \
.filter(~Q(memberships__end_date='')).distinct()
people_with_end_date = {}
for person in people:
people_with_end_date[person] = person.memberships.order_by(
'-end_date').first().end_date
objects, page_range = _get_pagination(tuple(people_with_end_date.items()),
request)
context = {'jur_id': jur_id,
'people': objects,
'page_range': page_range}
return render(request, 'admintools/list_retired_legislators.html', context)
# Name Resolution Tool
def name_resolution_tool(request, jur_id, category):
if request.method == 'POST':
count = 0
for name, pid in request.POST.items():
if pid and not name.startswith('csrf'):
PersonName.objects.create(person_id=pid, name=name,
note='added via name resolution tool'
)
person = Person.objects.get(pk=pid)
if 'other_names' not in person.locked_fields:
person.locked_fields.append('other_names')
person.save()
if category == 'unmatched_bill_sponsors':
sp = BillSponsorship.objects.filter(entity_type='person',
person_id=None,
name=name)
sp.update(person_id=pid)
elif category == 'unmatched_voteevent_voters':
vs = PersonVote.objects.filter(voter_id=None,
voter_name=name)
vs.update(voter_id=pid)
elif category == 'unmatched_memberships':
mem = Membership.objects.filter(person_id=None,
person_name=name)
mem.update(person_id=pid)
else:
raise ValueError('Name Resolution Tool needs update'
' for new category')
count += 1
messages.success(request, 'Successfully Updated {} '
'Umatched legislator(s)'.format(count))
unresolved = Counter()
session_search = False
session_id = request.GET.get('session_id')
if category != 'unmatched_memberships' and \
not session_id and not session_id == 'all':
# By Default:- Filter By Latest Legislative Session
session_id = LegislativeSession.objects.filter(
jurisdiction_id=jur_id).order_by('-identifier') \
.first().identifier
if category == 'unmatched_bill_sponsors':
queryset = BillSponsorship.objects \
.filter(
bill__legislative_session__jurisdiction_id=jur_id,
entity_type='person',
person_id=None).annotate(num=Count('name'))
if session_id != 'all':
queryset = queryset.filter(
bill__legislative_session__identifier=session_id)
session_search = session_id
# Calculates how many times a unmatched name appeared.
for obj in queryset:
unresolved[obj.name] += obj.num
elif category == 'unmatched_voteevent_voters':
queryset = PersonVote.objects \
.filter(
vote_event__legislative_session__jurisdiction_id=jur_id,
voter_id=None).annotate(num=Count('voter_name'))
if session_id != 'all':
queryset = queryset.filter(
vote_event__legislative_session__identifier=session_id)
session_search = session_id
# Calculates how many times a unmatched name appeared.
for obj in queryset:
unresolved[obj.voter_name] += obj.num
elif category == 'unmatched_memberships':
queryset = Membership.objects.filter(
organization__jurisdiction_id=jur_id,
person_id=None
).annotate(num=Count('person_name'))
# Calculates how many times a unmatched name appeared.
for obj in queryset:
unresolved[obj.person_name] += obj.num
else:
raise ValueError('Name Resolution Tool needs update'
' for new category')
# convert unresolved to a normal dict so it's iterable in template
unresolved = sorted(((k, v) for (k, v) in unresolved.items()),
key=lambda x: x[1], reverse=True)
objects, page_range = _get_pagination(unresolved, request)
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id) \
.order_by('name').distinct()
context = {
'jur_id': jur_id,
'people': people,
'unresolved': objects,
'page_range': page_range,
'category': category,
'session_search': session_search,
}
return render(request, 'admintools/unresolved.html', context)
# create a `unreviewed` patch for wrong values.
def create_person_patch(request, jur_id):
if request.method == 'POST':
p = Person.objects.get(id=request.POST['person'])
# if any error occur then `create` will throw error.
IssueResolverPatch.objects.create(
content_object=p,
jurisdiction_id=jur_id,
status='unreviewed',
old_value=request.POST['old_value'],
new_value=request.POST['new_value'],
source=request.POST.get('source'),
category=request.POST['category'],
alert='error',
note=request.POST.get('note'),
applied_by='admin',
)
messages.success(request, "Successfully created Patch")
people = Person.objects.filter(
memberships__organization__jurisdiction_id=jur_id).distinct()
context = {'jur_id': jur_id,
'people': people}
return render(request, 'admintools/create_person_patch.html', context)
| {
"content_hash": "7471e02963fdf92f741ec78aa77d3866",
"timestamp": "",
"source": "github",
"line_count": 680,
"max_line_length": 79,
"avg_line_length": 42.88529411764706,
"alnum_prop": 0.5582264590905973,
"repo_name": "hiteshgarg14/admintools",
"id": "88b5dc30aa41dcd7e36e00624c1eab88152896b8",
"size": "29162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "admintools/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "58679"
},
{
"name": "Python",
"bytes": "178582"
}
],
"symlink_target": ""
} |
"""
WebDAV expand-property report
"""
__all__ = ["report_DAV__expand_property"]
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.python.failure import Failure
from twext.python.log import Logger
from txweb2 import responsecode
from txdav.xml import element
from txdav.xml.element import dav_namespace
from txweb2.dav.http import statusForFailure, MultiStatusResponse
from txweb2.dav.method import prop_common
from txweb2.dav.method.propfind import propertyName
from txweb2.dav.resource import AccessDeniedError
from txweb2.dav.util import parentForURL
from txweb2.http import HTTPError, StatusResponse
log = Logger()
@inlineCallbacks
def report_DAV__expand_property(self, request, expand_property):
"""
Generate an expand-property REPORT. (RFC 3253, section 3.8)
TODO: for simplicity we will only support one level of expansion.
"""
# Verify root element
if not isinstance(expand_property, element.ExpandProperty):
raise ValueError("%s expected as root element, not %s."
% (element.ExpandProperty.sname(), expand_property.sname()))
# Only handle Depth: 0
depth = request.headers.getHeader("depth", "0")
if depth != "0":
log.error("Non-zero depth is not allowed: %s" % (depth,))
raise HTTPError(StatusResponse(responsecode.BAD_REQUEST, "Depth %s not allowed" % (depth,)))
#
# Get top level properties to expand and make sure we only have one level
#
properties = {}
for property in expand_property.children:
namespace = property.attributes.get("namespace", dav_namespace)
name = property.attributes.get("name", "")
# Make sure children have no children
props_to_find = []
for child in property.children:
if child.children:
log.error("expand-property REPORT only supports single level expansion")
raise HTTPError(StatusResponse(
responsecode.NOT_IMPLEMENTED,
"expand-property REPORT only supports single level expansion"
))
child_namespace = child.attributes.get("namespace", dav_namespace)
child_name = child.attributes.get("name", "")
props_to_find.append((child_namespace, child_name))
properties[(namespace, name)] = props_to_find
#
# Generate the expanded responses status for each top-level property
#
properties_by_status = {
responsecode.OK : [],
responsecode.NOT_FOUND : [],
}
filteredaces = None
lastParent = None
for qname in properties.iterkeys():
try:
prop = (yield self.readProperty(qname, request))
# Form the PROPFIND-style DAV:prop element we need later
props_to_return = element.PropertyContainer(*properties[qname])
# Now dereference any HRefs
responses = []
for href in prop.children:
if isinstance(href, element.HRef):
# Locate the Href resource and its parent
resource_uri = str(href)
child = (yield request.locateResource(resource_uri))
if not child or not child.exists():
responses.append(element.StatusResponse(href, element.Status.fromResponseCode(responsecode.NOT_FOUND)))
continue
parent = (yield request.locateResource(parentForURL(resource_uri)))
# Check privileges on parent - must have at least DAV:read
try:
yield parent.checkPrivileges(request, (element.Read(),))
except AccessDeniedError:
responses.append(element.StatusResponse(href, element.Status.fromResponseCode(responsecode.FORBIDDEN)))
continue
# Cache the last parent's inherited aces for checkPrivileges optimization
if lastParent != parent:
lastParent = parent
# Do some optimisation of access control calculation by determining any inherited ACLs outside of
# the child resource loop and supply those to the checkPrivileges on each child.
filteredaces = (yield parent.inheritedACEsforChildren(request))
# Check privileges - must have at least DAV:read
try:
yield child.checkPrivileges(request, (element.Read(),), inherited_aces=filteredaces)
except AccessDeniedError:
responses.append(element.StatusResponse(href, element.Status.fromResponseCode(responsecode.FORBIDDEN)))
continue
# Now retrieve all the requested properties on the HRef resource
yield prop_common.responseForHref(
request,
responses,
href,
child,
prop_common.propertyListForResource,
props_to_return,
)
prop.children = responses
properties_by_status[responsecode.OK].append(prop)
except:
f = Failure()
log.error(
"Error reading property {qname} for resource {req}: {failure}",
qname=qname, req=request.uri, failure=f.value
)
status = statusForFailure(f, "getting property: %s" % (qname,))
if status not in properties_by_status:
properties_by_status[status] = []
properties_by_status[status].append(propertyName(qname))
# Build the overall response
propstats = [
element.PropertyStatus(
element.PropertyContainer(*properties_by_status[pstatus]),
element.Status.fromResponseCode(pstatus)
)
for pstatus in properties_by_status if properties_by_status[pstatus]
]
returnValue(MultiStatusResponse((element.PropertyStatusResponse(element.HRef(request.uri), *propstats),)))
| {
"content_hash": "f78c7dd113e8deabdcd1a1880bccd192",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 127,
"avg_line_length": 40.20779220779221,
"alnum_prop": 0.6059431524547804,
"repo_name": "red-hood/calendarserver",
"id": "f38eb5e383cefa7db9162ceccf471027dd378425",
"size": "7375",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "txweb2/dav/method/report_expand.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1482"
},
{
"name": "CSS",
"bytes": "4214"
},
{
"name": "DIGITAL Command Language",
"bytes": "1234"
},
{
"name": "DTrace",
"bytes": "13143"
},
{
"name": "HTML",
"bytes": "36120"
},
{
"name": "JavaScript",
"bytes": "80248"
},
{
"name": "Makefile",
"bytes": "14429"
},
{
"name": "PLSQL",
"bytes": "12719"
},
{
"name": "PLpgSQL",
"bytes": "291431"
},
{
"name": "Python",
"bytes": "10537612"
},
{
"name": "R",
"bytes": "1091"
},
{
"name": "SQLPL",
"bytes": "6430"
},
{
"name": "Shell",
"bytes": "96975"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from apps.wordtrack.views import WordTrackView
urlpatterns = [
url(r'^$', WordTrackView.as_view(), name='home'),
]
| {
"content_hash": "c245175d8146562252c42cabbda0893f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 53,
"avg_line_length": 25.5,
"alnum_prop": 0.7189542483660131,
"repo_name": "husman/WoTrack",
"id": "7ff1a46880d2bd82e90ca6b7f4bdd850c631722d",
"size": "153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/wordtrack/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "549"
},
{
"name": "HTML",
"bytes": "2910"
},
{
"name": "Python",
"bytes": "16380"
}
],
"symlink_target": ""
} |
import datetime
import pytest
from anchore_engine.subsys import logger, simplequeue
from anchore_engine.subsys.logger import enable_test_logging
enable_test_logging()
singleton_queue = "testq1"
multi_queue = "testq2"
std_queue = "testq3"
@pytest.fixture
def test_qs(anchore_db):
"""
Expects to initialize the queues in an empty db, so it uses the anchore_db fixture itself to get initialized db
:return:
"""
global singleton_queue, multi_queue, std_queue
simplequeue.create_queue(
singleton_queue, max_outstanding_msgs=1, visibility_timeout=10
)
simplequeue.create_queue(multi_queue, max_outstanding_msgs=5, visibility_timeout=10)
simplequeue.create_queue(std_queue, max_outstanding_msgs=0, visibility_timeout=0)
def test_std_queue(test_qs):
"""
Simple queuing test using regular queue behavior
:param anchore_db:
:param test_qs:
:return:
"""
logger.info("Testing standard queue")
simplequeue.enqueue(std_queue, {"key1": "value1"})
msg = simplequeue.dequeue(std_queue)
logger.info("Got msg: {}".format(msg))
while msg:
logger.info("Deleting msg {}".format(msg))
simplequeue.delete_msg(std_queue, msg.get("receipt_handle"))
msg = simplequeue.dequeue(std_queue)
logger.info("Got msg: {}".format(msg))
simplequeue.enqueue(std_queue, {"key1": "value1"})
msg = simplequeue.dequeue(std_queue)
logger.info("Got msg: {}".format(msg))
assert msg is not None, "Got a None msg, expected a dict"
msg = simplequeue.dequeue(std_queue)
logger.info("Got msg: {}".format(msg))
assert msg == {}, "Expected an empty dict response, got {}".format(msg)
def test_singleton_queues(test_qs):
logger.info("Inserting")
simplequeue.enqueue(singleton_queue, {"key1": "value1"})
simplequeue.enqueue(singleton_queue, {"key1": "value1"})
simplequeue.enqueue(singleton_queue, {"key1": "value1"})
simplequeue.enqueue(singleton_queue, {"key1": "value1"})
logger.info("Reading back")
msg = simplequeue.dequeue(singleton_queue)
logger.info("Got msg: {}".format(msg))
while msg:
logger.info("Deleting msg {}".format(msg))
simplequeue.delete_msg(singleton_queue, msg.get("receipt_handle"))
msg = simplequeue.dequeue(singleton_queue)
logger.info("Got msg: {}".format(msg))
def test_multi_queues(test_qs):
logger.info("Inserting")
simplequeue.enqueue(multi_queue, {"key1": "value1"})
simplequeue.enqueue(multi_queue, {"key2": "value2"})
simplequeue.enqueue(multi_queue, {"key3": "value3"})
simplequeue.enqueue(multi_queue, {"key4": "value4"})
simplequeue.enqueue(multi_queue, {"key5": "value5"})
logger.info("Reading back")
counter = 0
msgs = []
msg = True
while msg:
logger.info("Got msg: {}".format(msg))
msg = simplequeue.dequeue(multi_queue)
if not msg:
logger.info("No msg received")
logger.info("Counter = {}".format(counter))
if counter > 0 and len(msgs) > 0:
for m in msgs:
logger.info("Deleting msg {}".format(m))
simplequeue.delete_msg(multi_queue, m.get("receipt_handle"))
msgs = []
counter = 0
else:
break
msg = True
else:
msgs.append(msg)
counter += 1
resp = simplequeue.delete_msg(multi_queue, "blah")
assert not resp, "Expected a false/None response, got: {}".format(resp)
def test_visibility_timeout(test_qs):
simplequeue.enqueue(multi_queue, {"key00001": "value0001"})
msg = simplequeue.dequeue(multi_queue, visibility_timeout=5)
max_approx_timeout = datetime.datetime.utcnow() + datetime.timedelta(seconds=5)
min_approx_timeout = max_approx_timeout + datetime.timedelta(seconds=-1)
assert (
min_approx_timeout <= msg["visible_at"] <= max_approx_timeout
), "Msg visible time, {}, outside expected range {} - {}".format(
msg.get("visible_at"), min_approx_timeout, max_approx_timeout
)
logger.info("Updating timeout: {}".format(msg["visible_at"].isoformat()))
ts = simplequeue.update_visibility_timeout(
multi_queue, receipt_handle=msg["receipt_handle"], visibility_timeout=20
)
logger.info("Updated timeout: {}".format(ts))
assert (
ts is not None
), "Expected a non-None value, got None for updated timeout after viz timeout update"
simplequeue.delete_msg(multi_queue, receipt_handle=msg["receipt_handle"])
| {
"content_hash": "22e1c49c8e048780311028cc30e7dd08",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 115,
"avg_line_length": 33.503649635036496,
"alnum_prop": 0.642483660130719,
"repo_name": "anchore/anchore-engine",
"id": "7b09b2780a330651d867d59ef80f90b115907e5a",
"size": "4590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/subsys/test_simplequeue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3889"
},
{
"name": "Dockerfile",
"bytes": "10954"
},
{
"name": "Makefile",
"bytes": "12274"
},
{
"name": "Python",
"bytes": "4529553"
},
{
"name": "Shell",
"bytes": "16598"
}
],
"symlink_target": ""
} |
"""
stats.py module
(Requires pstat.py module.)
#################################################
####### Written by: Gary Strangman ###########
####### Last modified: Dec 18, 2007 ###########
#################################################
A collection of basic statistical functions for python. The function
names appear below.
IMPORTANT: There are really *3* sets of functions. The first set has an 'l'
prefix, which can be used with list or tuple arguments. The second set has
an 'a' prefix, which can accept NumPy array arguments. These latter
functions are defined only when NumPy is available on the system. The third
type has NO prefix (i.e., has the name that appears below). Functions of
this set are members of a "Dispatch" class, c/o David Ascher. This class
allows different functions to be called depending on the type of the passed
arguments. Thus, stats.mean is a member of the Dispatch class and
stats.mean(range(20)) will call stats.lmean(range(20)) while
stats.mean(Numeric.arange(20)) will call stats.amean(Numeric.arange(20)).
This is a handy way to keep consistent function names when different
argument types require different functions to be called. Having
implementated the Dispatch class, however, means that to get info on
a given function, you must use the REAL function name ... that is
"print stats.lmean.__doc__" or "print stats.amean.__doc__" work fine,
while "print stats.mean.__doc__" will print the doc for the Dispatch
class. NUMPY FUNCTIONS ('a' prefix) generally have more argument options
but should otherwise be consistent with the corresponding list functions.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful
CENTRAL TENDENCY: geometricmean
harmonicmean
mean
median
medianscore
mode
MOMENTS: moment
variation
skew
kurtosis
skewtest (for Numpy arrays only)
kurtosistest (for Numpy arrays only)
normaltest (for Numpy arrays only)
ALTERED VERSIONS: tmean (for Numpy arrays only)
tvar (for Numpy arrays only)
tmin (for Numpy arrays only)
tmax (for Numpy arrays only)
tstdev (for Numpy arrays only)
tsem (for Numpy arrays only)
describe
FREQUENCY STATS: itemfreq
scoreatpercentile
percentileofscore
histogram
cumfreq
relfreq
VARIABILITY: obrientransform
samplevar
samplestdev
signaltonoise (for Numpy arrays only)
var
stdev
sterr
sem
z
zs
zmap (for Numpy arrays only)
TRIMMING FCNS: threshold (for Numpy arrays only)
trimboth
trim1
round (round all vals to 'n' decimals; Numpy only)
CORRELATION FCNS: covariance (for Numpy arrays only)
correlation (for Numpy arrays only)
paired
pearsonr
spearmanr
pointbiserialr
kendalltau
linregress
INFERENTIAL STATS: ttest_1samp
ttest_ind
ttest_rel
chisquare
ks_2samp
mannwhitneyu
ranksums
wilcoxont
kruskalwallish
friedmanchisquare
PROBABILITY CALCS: chisqprob
erfcc
zprob
ksprob
fprob
betacf
gammln
betai
ANOVA FUNCTIONS: F_oneway
F_value
SUPPORT FUNCTIONS: writecc
incr
sign (for Numpy arrays only)
sum
cumsum
ss
summult
sumdiffsquared
square_of_sums
shellsort
rankdata
outputpairedstats
findwithin
"""
## CHANGE LOG:
## ===========
## 07-11.26 ... conversion for numpy started
## 07-05-16 ... added Lin's Concordance Correlation Coefficient (alincc) and acov
## 05-08-21 ... added "Dice's coefficient"
## 04-10-26 ... added ap2t(), an ugly fcn for converting p-vals to T-vals
## 04-04-03 ... added amasslinregress() function to do regression on N-D arrays
## 03-01-03 ... CHANGED VERSION TO 0.6
## fixed atsem() to properly handle limits=None case
## improved histogram and median functions (estbinwidth) and
## fixed atvar() function (wrong answers for neg numbers?!?)
## 02-11-19 ... fixed attest_ind and attest_rel for div-by-zero Overflows
## 02-05-10 ... fixed lchisqprob indentation (failed when df=even)
# match Python License, fixed doc string & imports
## 00-04-13 ... pulled all "global" statements, except from aanova()
## added/fixed lots of documentation, removed io.py dependency
## changed to version 0.5
## 99-11-13 ... added asign() function
## 99-11-01 ... changed version to 0.4 ... enough incremental changes now
## 99-10-25 ... added acovariance and acorrelation functions
## 99-10-10 ... fixed askew/akurtosis to avoid divide-by-zero errors
## added aglm function (crude, but will be improved)
## 99-10-04 ... upgraded acumsum, ass, asummult, asamplevar, avar, etc. to
## all handle lists of 'dimension's and keepdims
## REMOVED ar0, ar2, ar3, ar4 and replaced them with around
## reinserted fixes for abetai to avoid math overflows
## 99-09-05 ... rewrote achisqprob/aerfcc/aksprob/afprob/abetacf/abetai to
## handle multi-dimensional arrays (whew!)
## 99-08-30 ... fixed l/amoment, l/askew, l/akurtosis per D'Agostino (1990)
## added anormaltest per same reference
## re-wrote azprob to calc arrays of probs all at once
## 99-08-22 ... edited attest_ind printing section so arrays could be rounded
## 99-08-19 ... fixed amean and aharmonicmean for non-error(!) overflow on
## short/byte arrays (mean of #s btw 100-300 = -150??)
## 99-08-09 ... fixed asum so that the None case works for Byte arrays
## 99-08-08 ... fixed 7/3 'improvement' to handle t-calcs on N-D arrays
## 99-07-03 ... improved attest_ind, attest_rel (zero-division errortrap)
## 99-06-24 ... fixed bug(?) in attest_ind (n1=a.shape[0])
## 04/11/99 ... added asignaltonoise, athreshold functions, changed all
## max/min in array section to N.maximum/N.minimum,
## fixed square_of_sums to prevent integer overflow
## 04/10/99 ... !!! Changed function name ... sumsquared ==> square_of_sums
## 03/18/99 ... Added ar0, ar2, ar3 and ar4 rounding functions
## 02/28/99 ... Fixed aobrientransform to return an array rather than a list
## 01/15/99 ... Essentially ceased updating list-versions of functions (!!!)
## 01/13/99 ... CHANGED TO VERSION 0.3
## fixed bug in a/lmannwhitneyu p-value calculation
## 12/31/98 ... fixed variable-name bug in ldescribe
## 12/19/98 ... fixed bug in findwithin (fcns needed pstat. prefix)
## 12/16/98 ... changed amedianscore to return float (not array) for 1 score
## 12/14/98 ... added atmin and atmax functions
## removed umath from import line (not needed)
## l/ageometricmean modified to reduce chance of overflows (take
## nth root first, then multiply)
## 12/07/98 ... added __version__variable (now 0.2)
## removed all 'stats.' from anova() fcn
## 12/06/98 ... changed those functions (except shellsort) that altered
## arguments in-place ... cumsum, ranksort, ...
## updated (and fixed some) doc-strings
## 12/01/98 ... added anova() function (requires NumPy)
## incorporated Dispatch class
## 11/12/98 ... added functionality to amean, aharmonicmean, ageometricmean
## added 'asum' function (added functionality to N.add.reduce)
## fixed both moment and amoment (two errors)
## changed name of skewness and askewness to skew and askew
## fixed (a)histogram (which sometimes counted points <lowerlimit)
import pstat # required 3rd party module
import math, string, copy # required python modules
from types import *
__version__ = 0.6
############# DISPATCH CODE ##############
class Dispatch:
"""
The Dispatch class, care of David Ascher, allows different functions to
be called depending on the argument types. This way, there can be one
function name regardless of the argument type. To access function doc
in stats.py module, prefix the function with an 'l' or 'a' for list or
array arguments, respectively. That is, print stats.lmean.__doc__ or
print stats.amean.__doc__ or whatever.
"""
def __init__(self, *tuples):
self._dispatch = {}
for func, types in tuples:
for t in types:
if t in self._dispatch.keys():
raise ValueError, "can't have two dispatches on "+str(t)
self._dispatch[t] = func
self._types = self._dispatch.keys()
def __call__(self, arg1, *args, **kw):
if type(arg1) not in self._types:
raise TypeError, "don't know how to dispatch %s arguments" % type(arg1)
return apply(self._dispatch[type(arg1)], (arg1,) + args, kw)
##########################################################################
######################## LIST-BASED FUNCTIONS ########################
##########################################################################
### Define these regardless
####################################
####### CENTRAL TENDENCY #########
####################################
def lgeometricmean (inlist):
"""
Calculates the geometric mean of the values in the passed list.
That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list.
Usage: lgeometricmean(inlist)
"""
mult = 1.0
one_over_n = 1.0/len(inlist)
for item in inlist:
mult = mult * pow(item,one_over_n)
return mult
Calculates the harmonic mean of the values in the passed list.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Assumes a '1D' list.
Usage: lharmonicmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + 1.0/item
return len(inlist) / sum
def lmean (inlist):
"""
Returns the arithematic mean of the values in the passed list.
Assumes a '1D' list, but will function on the 1st dim of an array(!).
Usage: lmean(inlist)
"""
sum = 0
for item in inlist:
sum = sum + item
return sum/float(len(inlist))
def lmedian (inlist,numbins=1000):
"""
Returns the computed median value of a list of numbers, given the
number of bins to use for the histogram (more bins brings the computed value
closer to the median score, default number of bins = 1000). See G.W.
Heiman's Basic Stats (1st Edition), or CRC Probability & Statistics.
Usage: lmedian (inlist, numbins=1000)
"""
(hist, smallest, binsize, extras) = histogram(inlist,numbins,[min(inlist),max(inlist)]) # make histog
cumhist = cumsum(hist) # make cumulative histogram
for i in range(len(cumhist)): # get 1st(!) index holding 50%ile score
if cumhist[i]>=len(inlist)/2.0:
cfbin = i
break
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = cumhist[cfbin-1]
freq = float(hist[cfbin]) # frequency IN the 50%ile bin
median = LRL + ((len(inlist)/2.0 - cfbelow)/float(freq))*binsize # median formula
return median
def lmedianscore (inlist):
"""
Returns the 'middle' score of the passed list. If there is an even
number of scores, the mean of the 2 middle scores is returned.
Usage: lmedianscore(inlist)
"""
newlist = copy.deepcopy(inlist)
newlist.sort()
if len(newlist) % 2 == 0: # if even number of scores, average middle 2
index = len(newlist)/2 # integer division correct
median = float(newlist[index] + newlist[index-1]) /2
else:
index = len(newlist)/2 # int divsion gives mid value when count from 0
median = newlist[index]
return median
def lmode(inlist):
"""
Returns a list of the modal (most common) score(s) in the passed
list. If there is more than one such score, all are returned. The
bin-count for the mode(s) is also returned.
Usage: lmode(inlist)
Returns: bin-count for mode(s), a list of modal value(s)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
maxfreq = max(freq)
mode = []
stillmore = 1
while stillmore:
try:
indx = freq.index(maxfreq)
mode.append(scores[indx])
del freq[indx]
del scores[indx]
except ValueError:
stillmore=0
return maxfreq, mode
####################################
############ MOMENTS #############
####################################
def lmoment(inlist,moment=1):
"""
Calculates the nth moment about the mean for a sample (defaults to
the 1st moment). Used to calculate coefficients of skewness and kurtosis.
Usage: lmoment(inlist,moment=1)
Returns: appropriate moment (r) from ... 1/n * SUM((inlist(i)-mean)**r)
"""
if moment == 1:
return 0.0
else:
mn = mean(inlist)
n = len(inlist)
s = 0
for x in inlist:
s = s + (x-mn)**moment
return s/float(n)
def lvariation(inlist):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6.
Usage: lvariation(inlist)
"""
return 100.0*samplestdev(inlist)/float(mean(inlist))
def lskew(inlist):
"""
Returns the skewness of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lskew(inlist)
"""
return moment(inlist,3)/pow(moment(inlist,2),1.5)
def lkurtosis(inlist):
"""
Returns the kurtosis of a distribution, as defined in Numerical
Recipies (alternate defn in CRC Standard Probability and Statistics, p.6.)
Usage: lkurtosis(inlist)
"""
return moment(inlist,4)/pow(moment(inlist,2),2.0)
def ldescribe(inlist):
"""
Returns some descriptive statistics of the passed list (assumed to be 1D).
Usage: ldescribe(inlist)
Returns: n, mean, standard deviation, skew, kurtosis
"""
n = len(inlist)
mm = (min(inlist),max(inlist))
m = mean(inlist)
sd = stdev(inlist)
sk = skew(inlist)
kurt = kurtosis(inlist)
return n, mm, m, sd, sk, kurt
####################################
####### FREQUENCY STATS ##########
####################################
def litemfreq(inlist):
"""
Returns a list of pairs. Each pair consists of one of the scores in inlist
and it's frequency count. Assumes a 1D list is passed.
Usage: litemfreq(inlist)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.unique(inlist)
scores.sort()
freq = []
for item in scores:
freq.append(inlist.count(item))
return pstat.abut(scores, freq)
def lscoreatpercentile (inlist, percent):
"""
Returns the score at a given percentile relative to the distribution
given by inlist.
Usage: lscoreatpercentile(inlist,percent)
"""
if percent > 1:
print "\nDividing percent>1 by 100 in lscoreatpercentile().\n"
percent = percent / 100.0
targetcf = percent*len(inlist)
h, lrl, binsize, extras = histogram(inlist)
cumhist = cumsum(copy.deepcopy(h))
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def lpercentileofscore (inlist, score,histbins=10,defaultlimits=None):
"""
Returns the percentile value of a score relative to the distribution
given by inlist. Formula depends on the values used to histogram the data(!).
Usage: lpercentileofscore(inlist,score,histbins=10,defaultlimits=None)
"""
h, lrl, binsize, extras = histogram(inlist,histbins,defaultlimits)
cumhist = cumsum(copy.deepcopy(h))
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inlist)) * 100
return pct
def lhistogram (inlist,numbins=10,defaultreallimits=None,printextras=0):
"""
Returns (i) a list of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. If no sequence object
is given for defaultreallimits, the routine picks (usually non-pretty) bins
spanning all the numbers in the inlist.
Usage: lhistogram (inlist, numbins=10, defaultreallimits=None,suppressoutput=0)
Returns: list of bin values, lowerreallimit, binsize, extrapoints
"""
if (defaultreallimits <> None):
if type(defaultreallimits) not in [ListType,TupleType] or len(defaultreallimits)==1: # only one limit given, assumed to be lower one & upper is calc'd
lowerreallimit = defaultreallimits
upperreallimit = 1.000001 * max(inlist)
else: # assume both limits given
lowerreallimit = defaultreallimits[0]
upperreallimit = defaultreallimits[1]
binsize = (upperreallimit-lowerreallimit)/float(numbins)
else: # no limits given for histogram, both must be calc'd
estbinwidth=(max(inlist)-min(inlist))/float(numbins) +1e-6 #1=>cover all
binsize = ((max(inlist)-min(inlist)+estbinwidth))/float(numbins)
lowerreallimit = min(inlist) - binsize/2 #lower real limit,1st bin
bins = [0]*(numbins)
extrapoints = 0
for num in inlist:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit)/float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except:
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def lcumfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Usage: lcumfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
cumhist = cumsum(copy.deepcopy(h))
return cumhist,l,b,e
def lrelfreq(inlist,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Usage: lrelfreq(inlist,numbins=10,defaultreallimits=None)
Returns: list of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(inlist,numbins,defaultreallimits)
for i in range(len(h)):
h[i] = h[i]/float(len(inlist))
return h,l,b,e
####################################
##### VARIABILITY FUNCTIONS ######
####################################
def lobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. From
Maxwell and Delaney, p.112.
Usage: lobrientransform(*args)
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = [0.0]*k
v = [0.0]*k
m = [0.0]*k
nargs = []
for i in range(k):
nargs.append(copy.deepcopy(args[i]))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Problem in obrientransform.'
else:
return nargs
def lsamplevar (inlist):
"""
Returns the variance of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample variance only).
Usage: lsamplevar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = []
for item in inlist:
deviations.append(item-mn)
return ss(deviations)/float(n)
def lsamplestdev (inlist):
"""
Returns the standard deviation of the values in the passed list using
N for the denominator (i.e., DESCRIBES the sample stdev only).
Usage: lsamplestdev(inlist)
"""
return math.sqrt(samplevar(inlist))
def lcov (x,y, keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: lcov(x,y,keepdims=0)
"""
n = len(x)
xmn = mean(x)
ymn = mean(y)
xdeviations = [0]*len(x)
ydeviations = [0]*len(y)
for i in range(len(x)):
xdeviations[i] = x[i] - xmn
ydeviations[i] = y[i] - ymn
ss = 0.0
for i in range(len(xdeviations)):
ss = ss + xdeviations[i]*ydeviations[i]
return ss/float(n-1)
def lvar (inlist):
"""
Returns the variance of the values in the passed list using N-1
for the denominator (i.e., for estimating population variance).
Usage: lvar(inlist)
"""
n = len(inlist)
mn = mean(inlist)
deviations = [0]*len(inlist)
for i in range(len(inlist)):
deviations[i] = inlist[i] - mn
return ss(deviations)/float(n-1)
def lstdev (inlist):
"""
Returns the standard deviation of the values in the passed list
using N-1 in the denominator (i.e., to estimate population stdev).
Usage: lstdev(inlist)
"""
return math.sqrt(var(inlist))
def lsterr(inlist):
"""
Returns the standard error of the values in the passed list using N-1
in the denominator (i.e., to estimate population standard error).
Usage: lsterr(inlist)
"""
return stdev(inlist) / float(math.sqrt(len(inlist)))
def lsem (inlist):
"""
Returns the estimated standard error of the mean (sx-bar) of the
values in the passed list. sem = stdev / sqrt(n)
Usage: lsem(inlist)
"""
sd = stdev(inlist)
n = len(inlist)
return sd/math.sqrt(n)
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
def lzs (inlist):
"""
Returns a list of z-scores, one for each score in the passed list.
Usage: lzs(inlist)
"""
zscores = []
for item in inlist:
zscores.append(z(inlist,item))
return zscores
####################################
####### TRIMMING FUNCTIONS #######
####################################
def ltrimboth (l,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
list (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND 'rightmost'
10% of scores. Assumes list is sorted by magnitude. Slices off LESS if
proportion results in a non-integer slice index (i.e., conservatively
slices off proportiontocut).
Usage: ltrimboth (l,proportiontocut)
Returns: trimmed version of list l
"""
lowercut = int(proportiontocut*len(l))
uppercut = len(l) - lowercut
return l[lowercut:uppercut]
def ltrim1 (l,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
list (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: ltrim1 (l,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of list l
"""
if tail == 'right':
lowercut = 0
uppercut = len(l) - int(proportiontocut*len(l))
elif tail == 'left':
lowercut = int(proportiontocut*len(l))
uppercut = len(l)
return l[lowercut:uppercut]
####################################
##### CORRELATION FUNCTIONS ######
####################################
def lpaired(x,y):
"""
Interactively determines the type of data and then runs the
appropriated statistic for paired group data.
Usage: lpaired(x,y)
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def lpearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (2nd), p.195.
Usage: lpearsonr(x,y) where x and y are equal-length lists
Returns: Pearson's r value, two-tailed p-value
"""
TINY = 1.0e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in pearsonr. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = n*(summult(x,y)) - sum(x)*sum(y)
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = (r_num / r_den) # denominator already a float
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/float(df+t*t))
return r, prob
def llincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
covar = lcov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = lvar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = lvar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def lspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: lspearmanr(x,y) where x and y are equal-length lists
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'Input values not paired in spearmanr. Aborting.'
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = sumdiffsquared(rankx,ranky)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They are close to tables, but not exact. (?)
return rs, probrs
def lpointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: lpointbiserialr(x,y) where x,y are equal-length lists
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
if len(x) <> len(y):
raise ValueError, 'INPUT VALUES NOT PAIRED IN pointbiserialr. ABORTING.'
data = pstat.abut(x,y)
categories = pstat.unique(x)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.abut(categories,range(2))
recoded = pstat.recode(data,codemap,0)
x = pstat.linexand(data,0,categories[0])
y = pstat.linexand(data,0,categories[1])
xmean = mean(pstat.colex(x,1))
ymean = mean(pstat.colex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/samplestdev(pstat.colex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t)) # t already a float
return rpb, prob
def lkendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-routine.@@@
Usage: lkendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither list has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) <> len(y):
raise ValueError, 'Input values not paired in linregress. Aborting.'
n = len(x)
x = map(float,x)
y = map(float,y)
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
####################################
##### INFERENTIAL STATISTICS #####
####################################
def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: lttest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
x = mean(a)
v = var(a)
n = len(a)
df = n-1
svar = ((n-1)*v)/float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = betai(0.5*df,0.5,float(df)/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,min(a),max(a),
statname,t,prob)
return t,prob
def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of
scores a, and b. From Numerical Recipies, p.483. If printit=1, results
are printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_ind(a,b,printit=0,name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed prob
"""
x1 = mean(a)
x2 = mean(b)
v1 = stdev(a)**2
v2 = stdev(b)**2
n1 = len(a)
n2 = len(b)
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2)/float(df)
t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,min(a),max(a),
name2,n2,x2,v2,min(b),max(b),
statname,t,prob)
return t,prob
def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores,
a and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output to
'filename' using the given writemode (default=append). Returns t-value,
and prob.
Usage: lttest_rel(a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a')
Returns: t-value, two-tailed prob
"""
if len(a)<>len(b):
raise ValueError, 'Unequal length lists in ttest_rel.'
x1 = mean(a)
x2 = mean(b)
v1 = var(a)
v2 = var(b)
n = len(a)
cov = 0
for i in range(len(a)):
cov = cov + (a[i]-x1) * (b[i]-x2)
df = n-1
cov = cov / float(df)
sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))
t = (x1-x2)/sd
prob = betai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,min(a),max(a),
name2,n,x2,v2,min(b),max(b),
statname,t,prob)
return t, prob
def lchisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for list of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
Usage: lchisquare(f_obs, f_exp=None) f_obs = list of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs) # number of groups
if f_exp == None:
f_exp = [sum(f_obs)/float(k)] * len(f_obs) # create k bins with = freq.
chisq = 0
for i in range(len(f_obs)):
chisq = chisq + (f_obs[i]-f_exp[i])**2 / float(f_exp[i])
return chisq, chisqprob(chisq, k-1)
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
def lmannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. NOTE: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U found in the tables. Equivalent to Kruskal-Wallis H with
just 2 groups.
Usage: lmannwhitneyu(data)
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(x+y)
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in lmannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - zprob(z)
def ltiecorrect(rankvals):
"""
Corrects for ties in Mann Whitney U and Kruskal Wallis H tests. See
Siegel, S. (1956) Nonparametric Statistics for the Behavioral Sciences.
New York: McGraw-Hill. Code adapted from |Stat rankind.c code.
Usage: ltiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = shellsort(rankvals)
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
def lwilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: lwilcoxont(x,y)
Returns: a t-statistic, two-tail probability estimate
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in wilcoxont. Aborting.'
d=[]
for i in range(len(x)):
diff = x[i] - y[i]
if diff <> 0:
d.append(diff)
count = len(d)
absd = map(abs,d)
absranked = rankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = map(len,args)
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in lkruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def lfriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, 'Less than 3 levels. Friedman test not appropriate.'
n = len(args[0])
data = apply(pstat.abut,tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i])**2
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, chisqprob(chisq,k-1)
####################################
#### PROBABILITY CALCULATIONS ####
####################################
def lchisqprob(chisq,df):
"""
Returns the (1-tailed) probability value associated with the provided
chi-square value and df. Adapted from chisq.c in Gary Perlman's |Stat.
Usage: lchisqprob(chisq,df)
"""
BIG = 20.0
def ex(x):
BIG = 20.0
if x < -BIG:
return 0.0
else:
return math.exp(x)
if chisq <=0 or df < 1:
return 1.0
a = 0.5 * chisq
if df%2 == 0:
even = 1
else:
even = 0
if df > 1:
y = ex(-a)
if even:
s = y
else:
s = 2.0 * zprob(-math.sqrt(chisq))
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = 1.0
else:
z = 0.5
if a > BIG:
if even:
e = 0.0
else:
e = math.log(math.sqrt(math.pi))
c = math.log(a)
while (z <= chisq):
e = math.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
return s
else:
if even:
e = 1.0
else:
e = 1.0 / math.sqrt(math.pi) / math.sqrt(a)
c = 0.0
while (z <= chisq):
e = e * (a/float(z))
c = c + e
z = z + 1.0
return (c*y+s)
else:
return s
def lerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * math.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans
def lzprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat.
Usage: lzprob(z)
"""
Z_MAX = 6.0 # maximum meaningful z-value
if z == 0.0:
x = 0.0
else:
y = 0.5 * math.fabs(z)
if y >= (Z_MAX*0.5):
x = 1.0
elif (y < 1.0):
w = y*y
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * y * 2.0
else:
y = y - 2.0
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
if z > 0.0:
prob = ((x+1.0)*0.5)
else:
prob = ((1.0-x)*0.5)
return prob
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0 # Get here only if fails to converge; was 0.0!!
def lfprob (dfnum, dfden, F):
"""
Returns the (1-tailed) significance level (p-value) of an F
statistic given the degrees of freedom for the numerator (dfR-dfF) and
the degrees of freedom for the denominator (dfF).
Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
p = betai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
return p
def lbetacf(a,b,x):
"""
This function evaluates the continued fraction form of the incomplete
Beta function, betai. (Adapted from: Numerical Recipies in C.)
Usage: lbetacf(a,b,x)
"""
ITMAX = 200
EPS = 3.0e-7
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
if (abs(az-aold)<(EPS*abs(az))):
return az
print 'a or b too big, or ITMAX too small in Betacf.'
def lgammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
(Adapted from: Numerical Recipies in C.)
Usage: lgammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*math.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + math.log(2.50662827465*ser)
def lbetai(a,b,x):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented here,
using the betacf function. (Adapted from: Numerical Recipies in C.)
Usage: lbetai(a,b,x)
"""
if (x<0.0 or x>1.0):
raise ValueError, 'Bad x in lbetai'
if (x==0.0 or x==1.0):
bt = 0.0
else:
bt = math.exp(gammln(a+b)-gammln(a)-gammln(b)+a*math.log(x)+b*
math.log(1.0-x))
if (x<(a+1.0)/(a+b+2.0)):
return bt*betacf(a,b,x)/float(a)
else:
return 1.0-bt*betacf(b,a,1.0-x)/float(b)
####################################
####### ANOVA CALCULATIONS #######
####################################
def lF_oneway(*lists):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: F_oneway(*lists) where *lists is any number of lists, one per
treatment group
Returns: F value, one-tailed p-value
"""
a = len(lists) # ANOVA on 'a' groups, each in it's own list
means = [0]*a
vars = [0]*a
ns = [0]*a
alldata = []
tmp = map(N.array,lists)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,lists)
for i in range(len(lists)):
alldata = alldata + lists[i]
alldata = N.array(alldata)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for list in lists:
ssbn = ssbn + asquare_of_sums(N.array(list))/float(len(list))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = a-1
dfwn = bign - a
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def lF_value (ER,EF,dfnum,dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR-dfF = degrees of freedom of the numerator
dfF = degrees of freedom associated with the denominator/Full model
Usage: lF_value(ER,EF,dfnum,dfden)
"""
return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
####################################
######## SUPPORT FUNCTIONS #######
####################################
def writecc (listoflists,file,writetype='w',extra=2):
"""
Writes a list of lists to a file in columns, customized by the max
size of items within the columns (max size of items in col, +2 characters)
to specified file. File-overwrite is the default.
Usage: writecc (listoflists,file,writetype='w',extra=2)
Returns: None
"""
if type(listoflists[0]) not in [ListType,TupleType]:
listoflists = [listoflists]
outfile = open(file,writetype)
rowstokill = []
list2print = copy.deepcopy(listoflists)
for i in range(len(listoflists)):
if listoflists[i] == ['\n'] or listoflists[i]=='\n' or listoflists[i]=='dashes':
rowstokill = rowstokill + [i]
rowstokill.reverse()
for row in rowstokill:
del list2print[row]
maxsize = [0]*len(list2print[0])
for col in range(len(list2print[0])):
items = pstat.colex(list2print,col)
items = map(pstat.makestr,items)
maxsize[col] = max(map(len,items)) + extra
for row in listoflists:
if row == ['\n'] or row == '\n':
outfile.write('\n')
elif row == ['dashes'] or row == 'dashes':
dashes = [0]*len(maxsize)
for j in range(len(maxsize)):
dashes[j] = '-'*(maxsize[j]-2)
outfile.write(pstat.lineincustcols(dashes,maxsize))
else:
outfile.write(pstat.lineincustcols(row,maxsize))
outfile.write('\n')
outfile.close()
return None
def lincr(l,cap): # to increment a list up to a max-list of 'cap'
"""
Simulate a counting system from an n-dimensional list.
Usage: lincr(l,cap) l=list to increment, cap=max values for each list pos'n
Returns: next set of values for list l, OR -1 (if overflow)
"""
l[0] = l[0] + 1 # e.g., [0,0,0] --> [2,4,3] (=cap)
for i in range(len(l)):
if l[i] > cap[i] and i < len(l)-1: # if carryover AND not done
l[i] = 0
l[i+1] = l[i+1] + 1
elif l[i] > cap[i] and i == len(l)-1: # overflow past last column, must be finished
l = -1
return l
def lsum (inlist):
"""
Returns the sum of the items in the passed list.
Usage: lsum(inlist)
"""
s = 0
for item in inlist:
s = s + item
return s
def lcumsum (inlist):
"""
Returns a list consisting of the cumulative sum of the items in the
passed list.
Usage: lcumsum(inlist)
"""
newlist = copy.deepcopy(inlist)
for i in range(1,len(newlist)):
newlist[i] = newlist[i] + newlist[i-1]
return newlist
def lss(inlist):
"""
Squares each value in the passed list, adds up these squares and
returns the result.
Usage: lss(inlist)
"""
ss = 0
for item in inlist:
ss = ss + item*item
return ss
def lsummult (list1,list2):
"""
Multiplies elements in list1 and list2, element by element, and
returns the sum of all resulting multiplications. Must provide equal
length lists.
Usage: lsummult(list1,list2)
"""
if len(list1) <> len(list2):
raise ValueError, "Lists not equal length in summult."
s = 0
for item1,item2 in pstat.abut(list1,list2):
s = s + item1*item2
return s
def lsumdiffsquared(x,y):
"""
Takes pairwise differences of the values in lists x and y, squares
these differences, and returns the sum of these squares.
Usage: lsumdiffsquared(x,y)
Returns: sum[(x[i]-y[i])**2]
"""
sds = 0
for i in range(len(x)):
sds = sds + (x[i]-y[i])**2
return sds
def lsquare_of_sums(inlist):
"""
Adds the values in the passed list, squares the sum, and returns
the result.
Usage: lsquare_of_sums(inlist)
Returns: sum(inlist[i])**2
"""
s = sum(inlist)
return float(s)*s
def lshellsort(inlist):
"""
Shellsort algorithm. Sorts a 1D-list.
Usage: lshellsort(inlist)
Returns: sorted-inlist, sorting-index-vector (for original list)
"""
n = len(inlist)
svec = copy.deepcopy(inlist)
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted inlist, and ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def lrankdata(inlist):
"""
Ranks the data in inlist, dealing with ties appropritely. Assumes
a 1D inlist. Adapted from Gary Perlman's |Stat ranksort.
Usage: lrankdata(inlist)
Returns: a list of length equal to inlist, containing rank scores
"""
n = len(inlist)
svec, ivec = shellsort(inlist)
sumranks = 0
dupcount = 0
newlist = [0]*n
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newlist[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newlist
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)<>StringType or len(fname)==0:
print
print statname
print
pstat.printcc(lofl)
print
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print 'Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix
print
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
def lfindwithin (data):
"""
Returns an integer representing a binary vector, where 1=within-
subject factor, 0=between. Input equals the entire data 2D list (i.e.,
column 0=random factor, column -1=measured values (those two are skipped).
Note: input data is in |Stat format ... a list of lists ("2D list") with
one row per measured value, first column=subject identifier, last column=
score, one in-between column per factor (these columns contain level
designations on each factor). See also stats.anova.__doc__.
Usage: lfindwithin(data) data in |Stat format
"""
numfact = len(data[0])-1
withinvec = 0
for col in range(1,numfact):
examplelevel = pstat.unique(pstat.colex(data,col))[0]
rows = pstat.linexand(data,col,examplelevel) # get 1 level of this factor
factsubjs = pstat.unique(pstat.colex(rows,0))
allsubjs = pstat.unique(pstat.colex(data,0))
if len(factsubjs) == len(allsubjs): # fewer Ss than scores on this factor?
withinvec = withinvec + (1 << col)
return withinvec
#########################################################
#########################################################
####### DISPATCH LISTS AND TUPLES TO ABOVE FCNS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)), )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)), )
mean = Dispatch ( (lmean, (ListType, TupleType)), )
median = Dispatch ( (lmedian, (ListType, TupleType)), )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)), )
mode = Dispatch ( (lmode, (ListType, TupleType)), )
## MOMENTS:
moment = Dispatch ( (lmoment, (ListType, TupleType)), )
variation = Dispatch ( (lvariation, (ListType, TupleType)), )
skew = Dispatch ( (lskew, (ListType, TupleType)), )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)), )
describe = Dispatch ( (ldescribe, (ListType, TupleType)), )
## FREQUENCY STATISTICS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)), )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)), )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)), )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)), )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)), )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)), )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)), )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)), )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)), )
var = Dispatch ( (lvar, (ListType, TupleType)), )
stdev = Dispatch ( (lstdev, (ListType, TupleType)), )
sterr = Dispatch ( (lsterr, (ListType, TupleType)), )
sem = Dispatch ( (lsem, (ListType, TupleType)), )
z = Dispatch ( (lz, (ListType, TupleType)), )
zs = Dispatch ( (lzs, (ListType, TupleType)), )
## TRIMMING FCNS:
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)), )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)), )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)), )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)), )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)), )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)), )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)), )
linregress = Dispatch ( (llinregress, (ListType, TupleType)), )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)), )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)), )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)), )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)), )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)), )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)), )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)), )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)), )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)), )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)), )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)), )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)), )
zprob = Dispatch ( (lzprob, (IntType, FloatType)), )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)), )
fprob = Dispatch ( (lfprob, (IntType, FloatType)), )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)), )
betai = Dispatch ( (lbetai, (IntType, FloatType)), )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)), )
gammln = Dispatch ( (lgammln, (IntType, FloatType)), )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)), )
F_value = Dispatch ( (lF_value, (ListType, TupleType)), )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType)), )
sum = Dispatch ( (lsum, (ListType, TupleType)), )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)), )
ss = Dispatch ( (lss, (ListType, TupleType)), )
summult = Dispatch ( (lsummult, (ListType, TupleType)), )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)), )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)), )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)), )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)), )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)), )
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
#============= THE ARRAY-VERSION OF THE STATS FUNCTIONS ===============
try: # DEFINE THESE *ONLY* IF NUMERIC IS AVAILABLE
import numpy as N
import numpy.linalg as LA
#####################################
######## ACENTRAL TENDENCY ########
#####################################
def ageometricmean (inarray,dimension=None,keepdims=0):
"""
Calculates the geometric mean of the values in the passed array.
That is: n-th root of (x1 * x2 * ... * xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: ageometricmean(inarray,dimension=None,keepdims=0)
Returns: geometric mean computed over dim(s) listed in dimension
"""
inarray = N.array(inarray,N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult)
elif type(dimension) in [IntType,FloatType]:
size = inarray.shape[dimension]
mult = N.power(inarray,1.0/size)
mult = N.multiply.reduce(mult,dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
size = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
mult = N.power(inarray,1.0/size)
for dim in dims:
mult = N.multiply.reduce(mult,dim)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
mult = N.reshape(mult,shp)
return mult
def aharmonicmean (inarray,dimension=None,keepdims=0):
"""
Calculates the harmonic mean of the values in the passed array.
That is: n / (1/x1 + 1/x2 + ... + 1/xn). Defaults to ALL values in
the passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: aharmonicmean(inarray,dimension=None,keepdims=0)
Returns: harmonic mean computed over dim(s) in dimension
"""
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
size = len(inarray)
s = N.add.reduce(1.0 / inarray)
elif type(dimension) in [IntType,FloatType]:
size = float(inarray.shape[dimension])
s = N.add.reduce(1.0/inarray, dimension)
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to average over
dims = list(dimension)
dims.sort()
nondims = []
for i in range(len(inarray.shape)):
if i not in dims:
nondims.append(i)
tinarray = N.transpose(inarray,nondims+dims) # put keep-dims first
idx = [0] *len(nondims)
if idx == []:
size = len(N.ravel(inarray))
s = asum(1.0 / inarray)
if keepdims == 1:
s = N.reshape([s],N.ones(len(inarray.shape)))
else:
idx[0] = -1
loopcap = N.array(tinarray.shape[0:len(nondims)]) -1
s = N.zeros(loopcap+1,N.float_)
while incr(idx,loopcap) <> -1:
s[idx] = asum(1.0/tinarray[idx])
size = N.multiply.reduce(N.take(inarray.shape,dims))
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return size / s
def amean (inarray,dimension=None,keepdims=0):
"""
Calculates the arithmatic mean of the values in the passed array.
That is: 1/n * (x1 + x2 + ... + xn). Defaults to ALL values in the
passed array. Use dimension=None to flatten array first. REMEMBER: if
dimension=0, it collapses over dimension 0 ('rows' in a 2D array) only, and
if dimension is a sequence, it collapses over all specified dimensions. If
keepdims is set to 1, the resulting array will have as many dimensions as
inarray, with only 1 'level' per dim that was collapsed over.
Usage: amean(inarray,dimension=None,keepdims=0)
Returns: arithematic mean calculated over dim(s) in dimension
"""
if inarray.dtype in [N.int_, N.short,N.ubyte]:
inarray = inarray.astype(N.float_)
if dimension == None:
inarray = N.ravel(inarray)
sum = N.add.reduce(inarray)
denom = float(len(inarray))
elif type(dimension) in [IntType,FloatType]:
sum = asum(inarray,dimension)
denom = float(inarray.shape[dimension])
if keepdims == 1:
shp = list(inarray.shape)
shp[dimension] = 1
sum = N.reshape(sum,shp)
else: # must be a TUPLE of dims to average over
dims = list(dimension)
dims.sort()
dims.reverse()
sum = inarray *1.0
for dim in dims:
sum = N.add.reduce(sum,dim)
denom = N.array(N.multiply.reduce(N.take(inarray.shape,dims)),N.float_)
if keepdims == 1:
shp = list(inarray.shape)
for dim in dims:
shp[dim] = 1
sum = N.reshape(sum,shp)
return sum/denom
def amedian (inarray,numbins=1000):
"""
Calculates the COMPUTED median value of an array of numbers, given the
number of bins to use for the histogram (more bins approaches finding the
precise median value of the array; default number of bins = 1000). From
G.W. Heiman's Basic Stats, or CRC Probability & Statistics.
NOTE: THIS ROUTINE ALWAYS uses the entire passed array (flattens it first).
Usage: amedian(inarray,numbins=1000)
Returns: median calculated over ALL values in inarray
"""
inarray = N.ravel(inarray)
(hist, smallest, binsize, extras) = ahistogram(inarray,numbins,[min(inarray),max(inarray)])
cumhist = N.cumsum(hist) # make cumulative histogram
otherbins = N.greater_equal(cumhist,len(inarray)/2.0)
otherbins = list(otherbins) # list of 0/1s, 1s start at median bin
cfbin = otherbins.index(1) # get 1st(!) index holding 50%ile score
LRL = smallest + binsize*cfbin # get lower read limit of that bin
cfbelow = N.add.reduce(hist[0:cfbin]) # cum. freq. below bin
freq = hist[cfbin] # frequency IN the 50%ile bin
median = LRL + ((len(inarray)/2.0-cfbelow)/float(freq))*binsize # MEDIAN
return median
def amedianscore (inarray,dimension=None):
"""
Returns the 'middle' score of the passed array. If there is an even
number of scores, the mean of the 2 middle scores is returned. Can function
with 1D arrays, or on the FIRST dimension of 2D arrays (i.e., dimension can
be None, to pre-flatten the array, or else dimension must equal 0).
Usage: amedianscore(inarray,dimension=None)
Returns: 'middle' score of the array, or the mean of the 2 middle scores
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
inarray = N.sort(inarray,dimension)
if inarray.shape[dimension] % 2 == 0: # if even number of elements
indx = inarray.shape[dimension]/2 # integer division correct
median = N.asarray(inarray[indx]+inarray[indx-1]) / 2.0
else:
indx = inarray.shape[dimension] / 2 # integer division correct
median = N.take(inarray,[indx],dimension)
if median.shape == (1,):
median = median[0]
return median
def amode(a, dimension=None):
"""
Returns an array of the modal (most common) score in the passed array.
If there is more than one such score, ONLY THE FIRST is returned.
The bin-count for the modal values is also returned. Operates on whole
array (dimension=None), or on a given dimension.
Usage: amode(a, dimension=None)
Returns: array of bin-counts for mode(s), array of corresponding modal values
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
scores = pstat.aunique(N.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[dimension] = 1
oldmostfreq = N.zeros(testshape)
oldcounts = N.zeros(testshape)
for score in scores:
template = N.equal(a,score)
counts = asum(template,dimension,1)
mostfrequent = N.where(counts>oldcounts,score,oldmostfreq)
oldcounts = N.where(counts>oldcounts,counts,oldcounts)
oldmostfreq = mostfrequent
return oldcounts, mostfrequent
def atmean(a,limits=None,inclusive=(1,1)):
"""
Returns the arithmetic mean of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atmean(a,limits=None,inclusive=(1,1))
"""
if a.dtype in [N.int_, N.short,N.ubyte]:
a = a.astype(N.float_)
if limits == None:
return mean(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atmean"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atmean)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
s = float(N.add.reduce(N.ravel(a*mask)))
n = float(N.add.reduce(N.ravel(mask)))
return s/n
def atvar(a,limits=None,inclusive=(1,1)):
"""
Returns the sample variance of values in an array, (i.e., using N-1),
ignoring values strictly outside the sequence passed to 'limits'.
Note: either limit in the sequence, or the value of limits itself,
can be set to None. The inclusive list/tuple determines whether the lower
and upper limiting bounds (respectively) are open/exclusive (0) or
closed/inclusive (1). ASSUMES A FLAT ARRAY (OR ELSE PREFLATTENS).
Usage: atvar(a,limits=None,inclusive=(1,1))
"""
a = a.astype(N.float_)
if limits == None or limits == [None,None]:
return avar(a)
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atvar"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atvar)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
a = N.compress(mask,a) # squish out excluded values
return avar(a)
def atmin(a,lowerlimit=None,dimension=None,inclusive=1):
"""
Returns the minimum value of a, along dimension, including only values less
than (or equal to, if inclusive=1) lowerlimit. If the limit is set to None,
all values in the array are used.
Usage: atmin(a,lowerlimit=None,dimension=None,inclusive=1)
"""
if inclusive: lowerfcn = N.greater
else: lowerfcn = N.greater_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if lowerlimit == None:
lowerlimit = N.minimum.reduce(N.ravel(a))-11
biggest = N.maximum.reduce(N.ravel(a))
ta = N.where(lowerfcn(a,lowerlimit),a,biggest)
return N.minimum.reduce(ta,dimension)
def atmax(a,upperlimit,dimension=None,inclusive=1):
"""
Returns the maximum value of a, along dimension, including only values greater
than (or equal to, if inclusive=1) upperlimit. If the limit is set to None,
a limit larger than the max value in the array is used.
Usage: atmax(a,upperlimit,dimension=None,inclusive=1)
"""
if inclusive: upperfcn = N.less
else: upperfcn = N.less_equal
if dimension == None:
a = N.ravel(a)
dimension = 0
if upperlimit == None:
upperlimit = N.maximum.reduce(N.ravel(a))+1
smallest = N.minimum.reduce(N.ravel(a))
ta = N.where(upperfcn(a,upperlimit),a,smallest)
return N.maximum.reduce(ta,dimension)
def atstdev(a,limits=None,inclusive=(1,1)):
"""
Returns the standard deviation of all values in an array, ignoring values
strictly outside the sequence passed to 'limits'. Note: either limit
in the sequence, or the value of limits itself, can be set to None. The
inclusive list/tuple determines whether the lower and upper limiting bounds
(respectively) are open/exclusive (0) or closed/inclusive (1).
Usage: atstdev(a,limits=None,inclusive=(1,1))
"""
return N.sqrt(tvar(a,limits,inclusive))
def atsem(a,limits=None,inclusive=(1,1)):
"""
Returns the standard error of the mean for the values in an array,
(i.e., using N for the denominator), ignoring values strictly outside
the sequence passed to 'limits'. Note: either limit in the sequence,
or the value of limits itself, can be set to None. The inclusive list/tuple
determines whether the lower and upper limiting bounds (respectively) are
open/exclusive (0) or closed/inclusive (1).
Usage: atsem(a,limits=None,inclusive=(1,1))
"""
sd = tstdev(a,limits,inclusive)
if limits == None or limits == [None,None]:
n = float(len(N.ravel(a)))
limits = [min(a)-1, max(a)+1]
assert type(limits) in [ListType,TupleType,N.ndarray], "Wrong type for limits in atsem"
if inclusive[0]: lowerfcn = N.greater_equal
else: lowerfcn = N.greater
if inclusive[1]: upperfcn = N.less_equal
else: upperfcn = N.less
if limits[0] > N.maximum.reduce(N.ravel(a)) or limits[1] < N.minimum.reduce(N.ravel(a)):
raise ValueError, "No array values within given limits (atsem)."
elif limits[0]==None and limits[1]<>None:
mask = upperfcn(a,limits[1])
elif limits[0]<>None and limits[1]==None:
mask = lowerfcn(a,limits[0])
elif limits[0]<>None and limits[1]<>None:
mask = lowerfcn(a,limits[0])*upperfcn(a,limits[1])
term1 = N.add.reduce(N.ravel(a*a*mask))
n = float(N.add.reduce(N.ravel(mask)))
return sd/math.sqrt(n)
#####################################
############ AMOMENTS #############
#####################################
def amoment(a,moment=1,dimension=None):
"""
Calculates the nth moment about the mean for a sample (defaults to the
1st moment). Generally used to calculate coefficients of skewness and
kurtosis. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions).
Usage: amoment(a,moment=1,dimension=None)
Returns: appropriate moment along given dimension
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if moment == 1:
return 0.0
else:
mn = amean(a,dimension,1) # 1=keepdims
s = N.power((a-mn),moment)
return amean(s,dimension)
def avariation(a,dimension=None):
"""
Returns the coefficient of variation, as defined in CRC Standard
Probability and Statistics, p.6. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: avariation(a,dimension=None)
"""
return 100.0*asamplestdev(a,dimension)/amean(a,dimension)
def askew(a,dimension=None):
"""
Returns the skewness of a distribution (normal ==> 0.0; >0 means extra
weight in left tail). Use askewtest() to see if it's close enough.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions).
Usage: askew(a, dimension=None)
Returns: skew of vals in a along dimension, returning ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),1.5)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) <> 0:
print "Number of zeros in askew: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero, 0, amoment(a,3,dimension)/denom)
def akurtosis(a,dimension=None):
"""
Returns the kurtosis of a distribution (normal ==> 3.0; >3 means
heavier in the tails, and usually more peaked). Use akurtosistest()
to see if it's close enough. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: akurtosis(a,dimension=None)
Returns: kurtosis of values in a along dimension, and ZERO where all vals equal
"""
denom = N.power(amoment(a,2,dimension),2)
zero = N.equal(denom,0)
if type(denom) == N.ndarray and asum(zero) <> 0:
print "Number of zeros in akurtosis: ",asum(zero)
denom = denom + zero # prevent divide-by-zero
return N.where(zero,0,amoment(a,4,dimension)/denom)
def adescribe(inarray,dimension=None):
"""
Returns several descriptive statistics of the passed array. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
Usage: adescribe(inarray,dimension=None)
Returns: n, (min,max), mean, standard deviation, skew, kurtosis
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
n = inarray.shape[dimension]
mm = (N.minimum.reduce(inarray),N.maximum.reduce(inarray))
m = amean(inarray,dimension)
sd = astdev(inarray,dimension)
skew = askew(inarray,dimension)
kurt = akurtosis(inarray,dimension)
return n, mm, m, sd, skew, kurt
#####################################
######## NORMALITY TESTS ##########
#####################################
def askewtest(a,dimension=None):
"""
Tests whether the skew is significantly different from a normal
distribution. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions).
Usage: askewtest(a,dimension=None)
Returns: z-score and 2-tail z-probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
b2 = askew(a,dimension)
n = float(a.shape[dimension])
y = b2 * N.sqrt(((n+1)*(n+3)) / (6.0*(n-2)) )
beta2 = ( 3.0*(n*n+27*n-70)*(n+1)*(n+3) ) / ( (n-2.0)*(n+5)*(n+7)*(n+9) )
W2 = -1 + N.sqrt(2*(beta2-1))
delta = 1/N.sqrt(N.log(N.sqrt(W2)))
alpha = N.sqrt(2/(W2-1))
y = N.where(y==0,1,y)
Z = delta*N.log(y/alpha + N.sqrt((y/alpha)**2+1))
return Z, (1.0-zprob(Z))*2
def akurtosistest(a,dimension=None):
"""
Tests whether a dataset has normal kurtosis (i.e.,
kurtosis=3(n-1)/(n+1)) Valid only for n>20. Dimension can equal None
(ravel array first), an integer (the dimension over which to operate),
or a sequence (operate over multiple dimensions).
Usage: akurtosistest(a,dimension=None)
Returns: z-score and 2-tail z-probability, returns 0 for bad pixels
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
n = float(a.shape[dimension])
if n<20:
print "akurtosistest only valid for n>=20 ... continuing anyway, n=",n
b2 = akurtosis(a,dimension)
E = 3.0*(n-1) /(n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1)*(n+3)*(n+5))
x = (b2-E)/N.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * N.sqrt((6.0*(n+3)*(n+5))/
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 *(2.0/sqrtbeta1 + N.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 -2/(9.0*A)
denom = 1 +x*N.sqrt(2/(A-4.0))
denom = N.where(N.less(denom,0), 99, denom)
term2 = N.where(N.equal(denom,0), term1, N.power((1-2.0/A)/denom,1/3.0))
Z = ( term1 - term2 ) / N.sqrt(2/(9.0*A))
Z = N.where(N.equal(denom,99), 0, Z)
return Z, (1.0-zprob(Z))*2
def anormaltest(a,dimension=None):
"""
Tests whether skew and/OR kurtosis of dataset differs from normal
curve. Can operate over multiple dimensions. Dimension can equal
None (ravel array first), an integer (the dimension over which to
operate), or a sequence (operate over multiple dimensions).
Usage: anormaltest(a,dimension=None)
Returns: z-score and 2-tail probability
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
s,p = askewtest(a,dimension)
k,p = akurtosistest(a,dimension)
k2 = N.power(s,2) + N.power(k,2)
return k2, achisqprob(k2,2)
#####################################
###### AFREQUENCY FUNCTIONS #######
#####################################
def aitemfreq(a):
"""
Returns a 2D array of item frequencies. Column 1 contains item values,
column 2 contains their respective counts. Assumes a 1D array is passed.
@@@sorting OK?
Usage: aitemfreq(a)
Returns: a 2D frequency table (col [0:n-1]=scores, col n=frequencies)
"""
scores = pstat.aunique(a)
scores = N.sort(scores)
freq = N.zeros(len(scores))
for i in range(len(scores)):
freq[i] = N.add.reduce(N.equal(a,scores[i]))
return N.array(pstat.aabut(scores, freq))
def ascoreatpercentile (inarray, percent):
"""
Usage: ascoreatpercentile(inarray,percent) 0<percent<100
Returns: score at given percentile, relative to inarray distribution
"""
percent = percent / 100.0
targetcf = percent*len(inarray)
h, lrl, binsize, extras = histogram(inarray)
cumhist = cumsum(h*1)
for i in range(len(cumhist)):
if cumhist[i] >= targetcf:
break
score = binsize * ((targetcf - cumhist[i-1]) / float(h[i])) + (lrl+binsize*i)
return score
def apercentileofscore (inarray,score,histbins=10,defaultlimits=None):
"""
Note: result of this function depends on the values used to histogram
the data(!).
Usage: apercentileofscore(inarray,score,histbins=10,defaultlimits=None)
Returns: percentile-position of score (0-100) relative to inarray
"""
h, lrl, binsize, extras = histogram(inarray,histbins,defaultlimits)
cumhist = cumsum(h*1)
i = int((score - lrl)/float(binsize))
pct = (cumhist[i-1]+((score-(lrl+binsize*i))/float(binsize))*h[i])/float(len(inarray)) * 100
return pct
def ahistogram (inarray,numbins=10,defaultlimits=None,printextras=1):
"""
Returns (i) an array of histogram bin counts, (ii) the smallest value
of the histogram binning, and (iii) the bin width (the last 2 are not
necessarily integers). Default number of bins is 10. Defaultlimits
can be None (the routine picks bins spanning all the numbers in the
inarray) or a 2-sequence (lowerlimit, upperlimit). Returns all of the
following: array of bin values, lowerreallimit, binsize, extrapoints.
Usage: ahistogram(inarray,numbins=10,defaultlimits=None,printextras=1)
Returns: (array of bin counts, bin-minimum, min-width, #-points-outside-range)
"""
inarray = N.ravel(inarray) # flatten any >1D arrays
if (defaultlimits <> None):
lowerreallimit = defaultlimits[0]
upperreallimit = defaultlimits[1]
binsize = (upperreallimit-lowerreallimit) / float(numbins)
else:
Min = N.minimum.reduce(inarray)
Max = N.maximum.reduce(inarray)
estbinwidth = float(Max - Min)/float(numbins) + 1e-6
binsize = (Max-Min+estbinwidth)/float(numbins)
lowerreallimit = Min - binsize/2.0 #lower real limit,1st bin
bins = N.zeros(numbins)
extrapoints = 0
for num in inarray:
try:
if (num-lowerreallimit) < 0:
extrapoints = extrapoints + 1
else:
bintoincrement = int((num-lowerreallimit) / float(binsize))
bins[bintoincrement] = bins[bintoincrement] + 1
except: # point outside lower/upper limits
extrapoints = extrapoints + 1
if (extrapoints > 0 and printextras == 1):
print '\nPoints outside given histogram range =',extrapoints
return (bins, lowerreallimit, binsize, extrapoints)
def acumfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a cumulative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: acumfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
cumhist = cumsum(h*1)
return cumhist,l,b,e
def arelfreq(a,numbins=10,defaultreallimits=None):
"""
Returns a relative frequency histogram, using the histogram function.
Defaultreallimits can be None (use all data), or a 2-sequence containing
lower and upper limits on values to include.
Usage: arelfreq(a,numbins=10,defaultreallimits=None)
Returns: array of cumfreq bin values, lowerreallimit, binsize, extrapoints
"""
h,l,b,e = histogram(a,numbins,defaultreallimits)
h = N.array(h/float(a.shape[0]))
return h,l,b,e
#####################################
###### AVARIABILITY FUNCTIONS #####
#####################################
def aobrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in *args is one level of a factor. If an F_oneway() run on the
transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Usage: aobrientransform(*args) *args = 1D arrays, one per level of factor
Returns: transformed data for use in an ANOVA
"""
TINY = 1e-10
k = len(args)
n = N.zeros(k,N.float_)
v = N.zeros(k,N.float_)
m = N.zeros(k,N.float_)
nargs = []
for i in range(k):
nargs.append(args[i].astype(N.float_))
n[i] = float(len(nargs[i]))
v[i] = var(nargs[i])
m[i] = mean(nargs[i])
for j in range(k):
for i in range(n[j]):
t1 = (n[j]-1.5)*n[j]*(nargs[j][i]-m[j])**2
t2 = 0.5*v[j]*(n[j]-1.0)
t3 = (n[j]-1.0)*(n[j]-2.0)
nargs[j][i] = (t1-t2) / float(t3)
check = 1
for j in range(k):
if v[j] - mean(nargs[j]) > TINY:
check = 0
if check <> 1:
raise ValueError, 'Lack of convergence in obrientransform.'
else:
return N.array(nargs)
def asamplevar (inarray,dimension=None,keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplevar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if dimension == 1:
mn = amean(inarray,dimension)[:,N.NewAxis]
else:
mn = amean(inarray,dimension,keepdims=1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
svar = ass(deviations,dimension,keepdims) / float(n)
return svar
def asamplestdev (inarray, dimension=None, keepdims=0):
"""
Returns the sample standard deviation of the values in the passed
array (i.e., using N). Dimension can equal None (ravel array first),
an integer (the dimension over which to operate), or a sequence
(operate over multiple dimensions). Set keepdims=1 to return an array
with the same number of dimensions as inarray.
Usage: asamplestdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(asamplevar(inarray,dimension,keepdims))
def asignaltonoise(instack,dimension=0):
"""
Calculates signal-to-noise. Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions).
Usage: asignaltonoise(instack,dimension=0):
Returns: array containing the value of (mean/stdev) along dimension,
or 0 when stdev=0
"""
m = mean(instack,dimension)
sd = stdev(instack,dimension)
return N.where(sd==0,0,m/sd)
def acov (x,y, dimension=None,keepdims=0):
"""
Returns the estimated covariance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: acov(x,y,dimension=None,keepdims=0)
"""
if dimension == None:
x = N.ravel(x)
y = N.ravel(y)
dimension = 0
xmn = amean(x,dimension,1) # keepdims
xdeviations = x - xmn
ymn = amean(y,dimension,1) # keepdims
ydeviations = y - ymn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*x.shape[d]
else:
n = x.shape[dimension]
covar = N.sum(xdeviations*ydeviations)/float(n-1)
return covar
def avar (inarray, dimension=None,keepdims=0):
"""
Returns the estimated population variance of the values in the passed
array (i.e., N-1). Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: avar(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
mn = amean(inarray,dimension,1)
deviations = inarray - mn
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
var = ass(deviations,dimension,keepdims)/float(n-1)
return var
def astdev (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard deviation of the values in
the passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: astdev(inarray,dimension=None,keepdims=0)
"""
return N.sqrt(avar(inarray,dimension,keepdims))
def asterr (inarray, dimension=None, keepdims=0):
"""
Returns the estimated population standard error of the values in the
passed array (i.e., N-1). Dimension can equal None (ravel array
first), an integer (the dimension over which to operate), or a
sequence (operate over multiple dimensions). Set keepdims=1 to return
an array with the same number of dimensions as inarray.
Usage: asterr(inarray,dimension=None,keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return astdev(inarray,dimension,keepdims) / float(N.sqrt(inarray.shape[dimension]))
def asem (inarray, dimension=None, keepdims=0):
"""
Returns the standard error of the mean (i.e., using N) of the values
in the passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions). Set keepdims=1 to return an array with the
same number of dimensions as inarray.
Usage: asem(inarray,dimension=None, keepdims=0)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
if type(dimension) == ListType:
n = 1
for d in dimension:
n = n*inarray.shape[d]
else:
n = inarray.shape[dimension]
s = asamplestdev(inarray,dimension,keepdims) / N.sqrt(n-1)
return s
def az (a, score):
"""
Returns the z-score of a given input score, given thearray from which
that score came. Not appropriate for population calculations, nor for
arrays > 1D.
Usage: az(a, score)
"""
z = (score-amean(a)) / asamplestdev(a)
return z
def azs (a):
"""
Returns a 1D array of z-scores, one for each score in the passed array,
computed relative to the passed array.
Usage: azs(a)
"""
zscores = []
for item in a:
zscores.append(z(a,item))
return N.array(zscores)
def azmap (scores, compare, dimension=0):
"""
Returns an array of z-scores the shape of scores (e.g., [x,y]), compared to
array passed to compare (e.g., [time,x,y]). Assumes collapsing over dim 0
of the compare array.
Usage: azs(scores, compare, dimension=0)
"""
mns = amean(compare,dimension)
sstd = asamplestdev(compare,0)
return (scores - mns) / sstd
#####################################
####### ATRIMMING FUNCTIONS #######
#####################################
## deleted around() as it's in numpy now
def athreshold(a,threshmin=None,threshmax=None,newval=0):
"""
Like Numeric.clip() except that values <threshmid or >threshmax are replaced
by newval instead of by threshmin/threshmax (respectively).
Usage: athreshold(a,threshmin=None,threshmax=None,newval=0)
Returns: a, with values <threshmin or >threshmax replaced with newval
"""
mask = N.zeros(a.shape)
if threshmin <> None:
mask = mask + N.where(a<threshmin,1,0)
if threshmax <> None:
mask = mask + N.where(a>threshmax,1,0)
mask = N.clip(mask,0,1)
return N.where(mask,newval,a)
def atrimboth (a,proportiontocut):
"""
Slices off the passed proportion of items from BOTH ends of the passed
array (i.e., with proportiontocut=0.1, slices 'leftmost' 10% AND
'rightmost' 10% of scores. You must pre-sort the array if you want
"proper" trimming. Slices off LESS if proportion results in a
non-integer slice index (i.e., conservatively slices off
proportiontocut).
Usage: atrimboth (a,proportiontocut)
Returns: trimmed version of array a
"""
lowercut = int(proportiontocut*len(a))
uppercut = len(a) - lowercut
return a[lowercut:uppercut]
def atrim1 (a,proportiontocut,tail='right'):
"""
Slices off the passed proportion of items from ONE end of the passed
array (i.e., if proportiontocut=0.1, slices off 'leftmost' or 'rightmost'
10% of scores). Slices off LESS if proportion results in a non-integer
slice index (i.e., conservatively slices off proportiontocut).
Usage: atrim1(a,proportiontocut,tail='right') or set tail='left'
Returns: trimmed version of array a
"""
if string.lower(tail) == 'right':
lowercut = 0
uppercut = len(a) - int(proportiontocut*len(a))
elif string.lower(tail) == 'left':
lowercut = int(proportiontocut*len(a))
uppercut = len(a)
return a[lowercut:uppercut]
#####################################
##### ACORRELATION FUNCTIONS ######
#####################################
def acovariance(X):
"""
Computes the covariance matrix of a matrix X. Requires a 2D matrix input.
Usage: acovariance(X)
Returns: covariance matrix of X
"""
if len(X.shape) <> 2:
raise TypeError, "acovariance requires 2D matrices"
n = X.shape[0]
mX = amean(X,0)
return N.dot(N.transpose(X),X) / float(n) - N.multiply.outer(mX,mX)
def acorrelation(X):
"""
Computes the correlation matrix of a matrix X. Requires a 2D matrix input.
Usage: acorrelation(X)
Returns: correlation matrix of X
"""
C = acovariance(X)
V = N.diagonal(C)
return C / N.sqrt(N.multiply.outer(V,V))
def apaired(x,y):
"""
Interactively determines the type of data in x and y, and then runs the
appropriated statistic for paired group data.
Usage: apaired(x,y) x,y = the two arrays of values to be compared
Returns: appropriate statistic name, value, and probability
"""
samples = ''
while samples not in ['i','r','I','R','c','C']:
print '\nIndependent or related samples, or correlation (i,r,c): ',
samples = raw_input()
if samples in ['i','I','r','R']:
print '\nComparing variances ...',
# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112
r = obrientransform(x,y)
f,p = F_oneway(pstat.colex(r,0),pstat.colex(r,1))
if p<0.05:
vartype='unequal, p='+str(round(p,4))
else:
vartype='equal'
print vartype
if samples in ['i','I']:
if vartype[0]=='e':
t,p = ttest_ind(x,y,None,0)
print '\nIndependent samples t-test: ', round(t,4),round(p,4)
else:
if len(x)>20 or len(y)>20:
z,p = ranksums(x,y)
print '\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)
else:
u,p = mannwhitneyu(x,y)
print '\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)
else: # RELATED SAMPLES
if vartype[0]=='e':
t,p = ttest_rel(x,y,0)
print '\nRelated samples t-test: ', round(t,4),round(p,4)
else:
t,p = ranksums(x,y)
print '\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)
else: # CORRELATION ANALYSIS
corrtype = ''
while corrtype not in ['c','C','r','R','d','D']:
print '\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',
corrtype = raw_input()
if corrtype in ['c','C']:
m,b,r,p,see = linregress(x,y)
print '\nLinear regression for continuous variables ...'
lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]
pstat.printcc(lol)
elif corrtype in ['r','R']:
r,p = spearmanr(x,y)
print '\nCorrelation for ranked variables ...'
print "Spearman's r: ",round(r,4),round(p,4)
else: # DICHOTOMOUS
r,p = pointbiserialr(x,y)
print '\nAssuming x contains a dichotomous variable ...'
print 'Point Biserial r: ',round(r,4),round(p,4)
print '\n\n'
return None
def dices(x,y):
"""
Calculates Dice's coefficient ... (2*number of common terms)/(number of terms in x +
number of terms in y). Returns a value between 0 (orthogonal) and 1.
Usage: dices(x,y)
"""
import sets
x = sets.Set(x)
y = sets.Set(y)
common = len(x.intersection(y))
total = float(len(x) + len(y))
return 2*common/total
def icc(x,y=None,verbose=0):
"""
Calculates intraclass correlation coefficients using simple, Type I sums of squares.
If only one variable is passed, assumed it's an Nx2 matrix
Usage: icc(x,y=None,verbose=0)
Returns: icc rho, prob ####PROB IS A GUESS BASED ON PEARSON
"""
TINY = 1.0e-20
if y:
all = N.concatenate([x,y],0)
else:
all = x+0
x = all[:,0]
y = all[:,1]
totalss = ass(all-mean(all))
pairmeans = (x+y)/2.
withinss = ass(x-pairmeans) + ass(y-pairmeans)
withindf = float(len(x))
betwdf = float(len(x)-1)
withinms = withinss / withindf
betweenms = (totalss-withinss) / betwdf
rho = (betweenms-withinms)/(withinms+betweenms)
t = rho*math.sqrt(betwdf/((1.0-rho+TINY)*(1.0+rho+TINY)))
prob = abetai(0.5*betwdf,0.5,betwdf/(betwdf+t*t),verbose)
return rho, prob
def alincc(x,y):
"""
Calculates Lin's concordance correlation coefficient.
Usage: alincc(x,y) where x, y are equal-length arrays
Returns: Lin's CC
"""
x = N.ravel(x)
y = N.ravel(y)
covar = acov(x,y)*(len(x)-1)/float(len(x)) # correct denom to n
xvar = avar(x)*(len(x)-1)/float(len(x)) # correct denom to n
yvar = avar(y)*(len(y)-1)/float(len(y)) # correct denom to n
lincc = (2 * covar) / ((xvar+yvar) +((amean(x)-amean(y))**2))
return lincc
def apearsonr(x,y,verbose=1):
"""
Calculates a Pearson correlation coefficient and returns p. Taken
from Heiman's Basic Statistics for the Behav. Sci (2nd), p.195.
Usage: apearsonr(x,y,verbose=1) where x,y are equal length arrays
Returns: Pearson's r, two-tailed p-value
"""
TINY = 1.0e-20
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = (r_num / r_den)
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t),verbose)
return r,prob
def aspearmanr(x,y):
"""
Calculates a Spearman rank-order correlation coefficient. Taken
from Heiman's Basic Statistics for the Behav. Sci (1st), p.192.
Usage: aspearmanr(x,y) where x,y are equal-length arrays
Returns: Spearman's r, two-tailed p-value
"""
TINY = 1e-30
n = len(x)
rankx = rankdata(x)
ranky = rankdata(y)
dsq = N.add.reduce((rankx-ranky)**2)
rs = 1 - 6*dsq / float(n*(n**2-1))
t = rs * math.sqrt((n-2) / ((rs+1.0)*(1.0-rs)))
df = n-2
probrs = abetai(0.5*df,0.5,df/(df+t*t))
# probability values for rs are from part 2 of the spearman function in
# Numerical Recipies, p.510. They close to tables, but not exact.(?)
return rs, probrs
def apointbiserialr(x,y):
"""
Calculates a point-biserial correlation coefficient and the associated
probability value. Taken from Heiman's Basic Statistics for the Behav.
Sci (1st), p.194.
Usage: apointbiserialr(x,y) where x,y are equal length arrays
Returns: Point-biserial r, two-tailed p-value
"""
TINY = 1e-30
categories = pstat.aunique(x)
data = pstat.aabut(x,y)
if len(categories) <> 2:
raise ValueError, "Exactly 2 categories required (in x) for pointbiserialr()."
else: # there are 2 categories, continue
codemap = pstat.aabut(categories,N.arange(2))
recoded = pstat.arecode(data,codemap,0)
x = pstat.alinexand(data,0,categories[0])
y = pstat.alinexand(data,0,categories[1])
xmean = amean(pstat.acolex(x,1))
ymean = amean(pstat.acolex(y,1))
n = len(data)
adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))
rpb = (ymean - xmean)/asamplestdev(pstat.acolex(data,1))*adjust
df = n-2
t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
return rpb, prob
def akendalltau(x,y):
"""
Calculates Kendall's tau ... correlation of ordinal data. Adapted
from function kendl1 in Numerical Recipies. Needs good test-cases.@@@
Usage: akendalltau(x,y)
Returns: Kendall's tau, two-tailed p-value
"""
n1 = 0
n2 = 0
iss = 0
for j in range(len(x)-1):
for k in range(j,len(y)):
a1 = x[j] - x[k]
a2 = y[j] - y[k]
aa = a1 * a2
if (aa): # neither array has a tie
n1 = n1 + 1
n2 = n2 + 1
if aa > 0:
iss = iss + 1
else:
iss = iss -1
else:
if (a1):
n1 = n1 + 1
else:
n2 = n2 + 1
tau = iss / math.sqrt(n1*n2)
svar = (4.0*len(x)+10.0) / (9.0*len(x)*(len(x)-1))
z = tau / math.sqrt(svar)
prob = erfcc(abs(z)/1.4142136)
return tau, prob
def alinregress(*args):
"""
Calculates a regression line on two arrays, x and y, corresponding to x,y
pairs. If a single 2D array is passed, alinregress finds dim with 2 levels
and splits data into x,y pairs along that dim.
Usage: alinregress(*args) args=2 equal-length arrays, or one 2D array
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:,0]
y = args[:,1]
else:
x = args[0]
y = args[1]
n = len(x)
xmean = amean(x)
ymean = amean(y)
r_num = n*(N.add.reduce(x*y)) - N.add.reduce(x)*N.add.reduce(y)
r_den = math.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y)-asquare_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
slope = r_num / (float(n)*ass(x) - asquare_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*asamplestdev(y)
return slope, intercept, r, prob, sterrest, n
def amasslinregress(*args):
"""
Calculates a regression line on one 1D array (x) and one N-D array (y).
Returns: slope, intercept, r, two-tailed prob, sterr-of-the-estimate, n
"""
TINY = 1.0e-20
if len(args) == 1: # more than 1D array?
args = args[0]
if len(args) == 2:
x = N.ravel(args[0])
y = args[1]
else:
x = N.ravel(args[:,0])
y = args[:,1]
else:
x = args[0]
y = args[1]
x = x.astype(N.float_)
y = y.astype(N.float_)
n = len(x)
xmean = amean(x)
ymean = amean(y,0)
shp = N.ones(len(y.shape))
shp[0] = len(x)
x.shape = shp
print x.shape, y.shape
r_num = n*(N.add.reduce(x*y,0)) - N.add.reduce(x)*N.add.reduce(y,0)
r_den = N.sqrt((n*ass(x) - asquare_of_sums(x))*(n*ass(y,0)-asquare_of_sums(y,0)))
zerodivproblem = N.equal(r_den,0)
r_den = N.where(zerodivproblem,1,r_den) # avoid zero-division in 1st place
r = r_num / r_den # need to do this nicely for matrix division
r = N.where(zerodivproblem,0.0,r)
z = 0.5*N.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*N.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = abetai(0.5*df,0.5,df/(df+t*t))
ss = float(n)*ass(x)-asquare_of_sums(x)
s_den = N.where(ss==0,1,ss) # avoid zero-division in 1st place
slope = r_num / s_den
intercept = ymean - slope*xmean
sterrest = N.sqrt(1-r*r)*asamplestdev(y,0)
return slope, intercept, r, prob, sterrest, n
#####################################
##### AINFERENTIAL STATISTICS #####
#####################################
def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):
"""
Calculates the t-obtained for the independent samples T-test on ONE group
of scores a, given a population mean. If printit=1, results are printed
to the screen. If printit='filename', the results are output to 'filename'
using the given writemode (default=append). Returns t-value, and prob.
Usage: attest_1samp(a,popmean,Name='Sample',printit=0,writemode='a')
Returns: t-value, two-tailed prob
"""
if type(a) != N.ndarray:
a = N.array(a)
x = amean(a)
v = avar(a)
n = len(a)
df = n-1
svar = ((n-1)*v) / float(df)
t = (x-popmean)/math.sqrt(svar*(1.0/n))
prob = abetai(0.5*df,0.5,df/(df+t*t))
if printit <> 0:
statname = 'Single-sample T-test.'
outputpairedstats(printit,writemode,
'Population','--',popmean,0,0,0,
name,n,x,v,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
statname,t,prob)
return t,prob
def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO INDEPENDENT samples of scores
a, and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_ind (a,b,dimension=None,printit=0,
Name1='Samp1',Name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n1 = a.shape[dimension]
n2 = b.shape[dimension]
df = n1+n2-2
svar = ((n1-1)*v1+(n2-1)*v2) / float(df)
zerodivproblem = N.equal(svar,0)
svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place
t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit <> 0:
if type(t) == N.ndarray:
t = t[0]
if type(probs) == N.ndarray:
probs = probs[0]
statname = 'Independent samples T-test.'
outputpairedstats(printit,writemode,
name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def ap2t(pval,df):
"""
Tries to compute a t-value from a p-value (or pval array) and associated df.
SLOW for large numbers of elements(!) as it re-computes p-values 20 times
(smaller step-sizes) at which point it decides it's done. Keeps the signs
of the input array. Returns 1000 (or -1000) if t>100.
Usage: ap2t(pval,df)
Returns: an array of t-values with the shape of pval
"""
pval = N.array(pval)
signs = N.sign(pval)
pval = abs(pval)
t = N.ones(pval.shape,N.float_)*50
step = N.ones(pval.shape,N.float_)*25
print "Initial ap2t() prob calc"
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
print 'ap2t() iter: ',
for i in range(10):
print i,' ',
t = N.where(pval<prob,t+step,t-step)
prob = abetai(0.5*df,0.5,float(df)/(df+t*t))
step = step/2
print
# since this is an ugly hack, we get ugly boundaries
t = N.where(t>99.9,1000,t) # hit upper-boundary
t = t+signs
return t #, prob, pval
def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):
"""
Calculates the t-obtained T-test on TWO RELATED samples of scores, a
and b. From Numerical Recipies, p.483. If printit=1, results are
printed to the screen. If printit='filename', the results are output
to 'filename' using the given writemode (default=append). Dimension
can equal None (ravel array first), or an integer (the dimension over
which to operate on a and b).
Usage: attest_rel(a,b,dimension=None,printit=0,
name1='Samp1',name2='Samp2',writemode='a')
Returns: t-value, two-tailed p-value
"""
if dimension == None:
a = N.ravel(a)
b = N.ravel(b)
dimension = 0
if len(a)<>len(b):
raise ValueError, 'Unequal length arrays.'
x1 = amean(a,dimension)
x2 = amean(b,dimension)
v1 = avar(a,dimension)
v2 = avar(b,dimension)
n = a.shape[dimension]
df = float(n-1)
d = (a-b).astype('d')
denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)
zerodivproblem = N.equal(denom,0)
denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place
t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!
t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
if type(t) == N.ndarray:
probs = N.reshape(probs,t.shape)
if probs.shape == (1,):
probs = probs[0]
if printit <> 0:
statname = 'Related samples T-test.'
outputpairedstats(printit,writemode,
name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),
N.maximum.reduce(N.ravel(a)),
name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),
N.maximum.reduce(N.ravel(b)),
statname,t,probs)
return
return t, probs
def achisquare(f_obs,f_exp=None):
"""
Calculates a one-way chi square for array of observed frequencies and returns
the result. If no expected frequencies are given, the total N is assumed to
be equally distributed across all groups.
@@@NOT RIGHT??
Usage: achisquare(f_obs, f_exp=None) f_obs = array of observed cell freq.
Returns: chisquare-statistic, associated p-value
"""
k = len(f_obs)
if f_exp == None:
f_exp = N.array([sum(f_obs)/float(k)] * len(f_obs),N.float_)
f_exp = f_exp.astype(N.float_)
chisq = N.add.reduce((f_obs-f_exp)**2 / f_exp)
return chisq, achisqprob(chisq, k-1)
def aks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. Modified from
Numerical Recipies in C, page 493. Returns KS D-value, prob. Not ufunc-
like.
Usage: aks_2samp(data1,data2) where data1 and data2 are 1D arrays
Returns: KS D-value, p-value
"""
j1 = 0 # N.zeros(data1.shape[1:]) TRIED TO MAKE THIS UFUNC-LIKE
j2 = 0 # N.zeros(data2.shape[1:])
fn1 = 0.0 # N.zeros(data1.shape[1:],N.float_)
fn2 = 0.0 # N.zeros(data2.shape[1:],N.float_)
n1 = data1.shape[0]
n2 = data2.shape[0]
en1 = n1*1
en2 = n2*1
d = N.zeros(data1.shape[1:],N.float_)
data1 = N.sort(data1,0)
data2 = N.sort(data2,0)
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if abs(dt) > abs(d):
d = dt
# try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = aksprob((en+0.12+0.11/en)*N.fabs(d))
# except:
# prob = 1.0
return d, prob
def amannwhitneyu(x,y):
"""
Calculates a Mann-Whitney U statistic on the provided scores and
returns the result. Use only when the n in each condition is < 20 and
you have 2 independent samples of ranks. REMEMBER: Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
Usage: amannwhitneyu(x,y) where x,y are arrays of values for 2 conditions
Returns: u-statistic, one-tailed p-value (i.e., p(z(U)))
"""
n1 = len(x)
n2 = len(y)
ranked = rankdata(N.concatenate((x,y)))
rankx = ranked[0:n1] # get the x-ranks
ranky = ranked[n1:] # the rest are y-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - sum(rankx) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
bigu = max(u1,u2)
smallu = min(u1,u2)
T = math.sqrt(tiecorrect(ranked)) # correction factor for tied scores
if T == 0:
raise ValueError, 'All numbers are identical in amannwhitneyu'
sd = math.sqrt(T*n1*n2*(n1+n2+1)/12.0)
z = abs((bigu-n1*n2/2.0) / sd) # normal approximation for prob calc
return smallu, 1.0 - azprob(z)
def atiecorrect(rankvals):
"""
Tie-corrector for ties in Mann Whitney U and Kruskal Wallis H tests.
See Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill. Code adapted from |Stat rankind.c
code.
Usage: atiecorrect(rankvals)
Returns: T correction factor for U or H
"""
sorted,posn = ashellsort(N.array(rankvals))
n = len(sorted)
T = 0.0
i = 0
while (i<n-1):
if sorted[i] == sorted[i+1]:
nties = 1
while (i<n-1) and (sorted[i] == sorted[i+1]):
nties = nties +1
i = i +1
T = T + nties**3 - nties
i = i+1
T = T / float(n**3-n)
return 1.0 - T
def aranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and returns
the result.
Usage: aranksums(x,y) where x,y are arrays of values for 2 conditions
Returns: z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = N.concatenate((x,y))
ranked = arankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 - azprob(abs(z)))
return z, prob
def awilcoxont(x,y):
"""
Calculates the Wilcoxon T-test for related samples and returns the
result. A non-parametric T-test.
Usage: awilcoxont(x,y) where x,y are equal-length arrays for 2 conditions
Returns: t-statistic, two-tailed p-value
"""
if len(x) <> len(y):
raise ValueError, 'Unequal N in awilcoxont. Aborting.'
d = x-y
d = N.compress(N.not_equal(d,0),d) # Keep all non-zero differences
count = len(d)
absd = abs(d)
absranked = arankdata(absd)
r_plus = 0.0
r_minus = 0.0
for i in range(len(absd)):
if d[i] < 0:
r_minus = r_minus + absranked[i]
else:
r_plus = r_plus + absranked[i]
wt = min(r_plus, r_minus)
mn = count * (count+1) * 0.25
se = math.sqrt(count*(count+1)*(2.0*count+1.0)/24.0)
z = math.fabs(wt-mn) / se
z = math.fabs(wt-mn) / se
prob = 2*(1.0 -zprob(abs(z)))
return wt, prob
def akruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H and associated p-value for 3 or more
independent samples.
Usage: akruskalwallish(*args) args are separate arrays for 3+ conditions
Returns: H-statistic (corrected for ties), associated p-value
"""
assert len(args) == 3, "Need at least 3 groups in stats.akruskalwallish()"
args = list(args)
n = [0]*len(args)
n = map(len,args)
all = []
for i in range(len(args)):
all = all + args[i].tolist()
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError, 'All numbers are identical in akruskalwallish'
h = h / float(T)
return h, chisqprob(h,df)
def afriedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for
repeated measures and returns the result, along with the associated
probability value. It assumes 3 or more repeated measures. Only 3
levels requires a minimum of 10 subjects in the study. Four levels
requires 5 subjects per level(??).
Usage: afriedmanchisquare(*args) args are separate arrays for 2+ conditions
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError, '\nLess than 3 levels. Friedman test not appropriate.\n'
n = len(args[0])
data = apply(pstat.aabut,args)
data = data.astype(N.float_)
for i in range(len(data)):
data[i] = arankdata(data[i])
ssbn = asum(asum(args,1)**2)
chisq = 12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)
return chisq, achisqprob(chisq,k-1)
#####################################
#### APROBABILITY CALCULATIONS ####
#####################################
def achisqprob(chisq,df):
"""
Returns the (1-tail) probability value associated with the provided chi-square
value and df. Heavily modified from chisq.c in Gary Perlman's |Stat. Can
handle multiple dimensions.
Usage: achisqprob(chisq,df) chisq=chisquare stat., df=degrees of freedom
"""
BIG = 200.0
def ex(x):
BIG = 200.0
exponents = N.where(N.less(x,-BIG),-BIG,x)
return N.exp(exponents)
if type(chisq) == N.ndarray:
arrayflag = 1
else:
arrayflag = 0
chisq = N.array([chisq])
if df < 1:
return N.ones(chisq.shape,N.float)
probs = N.zeros(chisq.shape,N.float_)
probs = N.where(N.less_equal(chisq,0),1.0,probs) # set prob=1 for chisq<0
a = 0.5 * chisq
if df > 1:
y = ex(-a)
if df%2 == 0:
even = 1
s = y*1
s2 = s*1
else:
even = 0
s = 2.0 * azprob(-N.sqrt(chisq))
s2 = s*1
if (df > 2):
chisq = 0.5 * (df - 1.0)
if even:
z = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
if even:
e = N.zeros(probs.shape,N.float_)
else:
e = N.log(N.sqrt(N.pi)) *N.ones(probs.shape,N.float_)
c = N.log(a)
mask = N.zeros(probs.shape)
a_big = N.greater(a,BIG)
a_big_frozen = -1 *N.ones(probs.shape,N.float_)
totalelements = N.multiply.reduce(N.array(probs.shape))
while asum(mask)<>totalelements:
e = N.log(z) + e
s = s + ex(c*z-a-e)
z = z + 1.0
# print z, e, s
newmask = N.greater(z,chisq)
a_big_frozen = N.where(newmask*N.equal(mask,0)*a_big, s, a_big_frozen)
mask = N.clip(newmask+mask,0,1)
if even:
z = N.ones(probs.shape,N.float_)
e = N.ones(probs.shape,N.float_)
else:
z = 0.5 *N.ones(probs.shape,N.float_)
e = 1.0 / N.sqrt(N.pi) / N.sqrt(a) * N.ones(probs.shape,N.float_)
c = 0.0
mask = N.zeros(probs.shape)
a_notbig_frozen = -1 *N.ones(probs.shape,N.float_)
while asum(mask)<>totalelements:
e = e * (a/z.astype(N.float_))
c = c + e
z = z + 1.0
# print '#2', z, e, c, s, c*y+s2
newmask = N.greater(z,chisq)
a_notbig_frozen = N.where(newmask*N.equal(mask,0)*(1-a_big),
c*y+s2, a_notbig_frozen)
mask = N.clip(newmask+mask,0,1)
probs = N.where(N.equal(probs,1),1,
N.where(N.greater(a,BIG),a_big_frozen,a_notbig_frozen))
return probs
else:
return s
def aerfcc(x):
"""
Returns the complementary error function erfc(x) with fractional error
everywhere less than 1.2e-7. Adapted from Numerical Recipies. Can
handle multiple dimensions.
Usage: aerfcc(x)
"""
z = abs(x)
t = 1.0 / (1.0+0.5*z)
ans = t * N.exp(-z*z-1.26551223 + t*(1.00002368+t*(0.37409196+t*(0.09678418+t*(-0.18628806+t*(0.27886807+t*(-1.13520398+t*(1.48851587+t*(-0.82215223+t*0.17087277)))))))))
return N.where(N.greater_equal(x,0), ans, 2.0-ans)
def azprob(z):
"""
Returns the area under the normal curve 'to the left of' the given z value.
Thus,
for z<0, zprob(z) = 1-tail probability
for z>0, 1.0-zprob(z) = 1-tail probability
for any z, 2.0*(1.0-zprob(abs(z))) = 2-tail probability
Adapted from z.c in Gary Perlman's |Stat. Can handle multiple dimensions.
Usage: azprob(z) where z is a z-value
"""
def yfunc(y):
x = (((((((((((((-0.000045255659 * y
+0.000152529290) * y -0.000019538132) * y
-0.000676904986) * y +0.001390604284) * y
-0.000794620820) * y -0.002034254874) * y
+0.006549791214) * y -0.010557625006) * y
+0.011630447319) * y -0.009279453341) * y
+0.005353579108) * y -0.002141268741) * y
+0.000535310849) * y +0.999936657524
return x
def wfunc(w):
x = ((((((((0.000124818987 * w
-0.001075204047) * w +0.005198775019) * w
-0.019198292004) * w +0.059054035642) * w
-0.151968751364) * w +0.319152932694) * w
-0.531923007300) * w +0.797884560593) * N.sqrt(w) * 2.0
return x
Z_MAX = 6.0 # maximum meaningful z-value
x = N.zeros(z.shape,N.float_) # initialize
y = 0.5 * N.fabs(z)
x = N.where(N.less(y,1.0),wfunc(y*y),yfunc(y-2.0)) # get x's
x = N.where(N.greater(y,Z_MAX*0.5),1.0,x) # kill those with big Z
prob = N.where(N.greater(z,0),(x+1)*0.5,(1-x)*0.5)
return prob
def aksprob(alam):
"""
Returns the probability value for a K-S statistic computed via ks_2samp.
Adapted from Numerical Recipies. Can handle multiple dimensions.
Usage: aksprob(alam)
"""
if type(alam) == N.ndarray:
frozen = -1 *N.ones(alam.shape,N.float64)
alam = alam.astype(N.float64)
arrayflag = 1
else:
frozen = N.array(-1.)
alam = N.array(alam,N.float64)
arrayflag = 1
mask = N.zeros(alam.shape)
fac = 2.0 *N.ones(alam.shape,N.float_)
sum = N.zeros(alam.shape,N.float_)
termbf = N.zeros(alam.shape,N.float_)
a2 = N.array(-2.0*alam*alam,N.float64)
totalelements = N.multiply.reduce(N.array(mask.shape))
for j in range(1,201):
if asum(mask) == totalelements:
break
exponents = (a2*j*j)
overflowmask = N.less(exponents,-746)
frozen = N.where(overflowmask,0,frozen)
mask = mask+overflowmask
term = fac*N.exp(exponents)
sum = sum + term
newmask = N.where(N.less_equal(abs(term),(0.001*termbf)) +
N.less(abs(term),1.0e-8*sum), 1, 0)
frozen = N.where(newmask*N.equal(mask,0), sum, frozen)
mask = N.clip(mask+newmask,0,1)
fac = -fac
termbf = abs(term)
if arrayflag:
return N.where(N.equal(frozen,-1), 1.0, frozen) # 1.0 if doesn't converge
else:
return N.where(N.equal(frozen,-1), 1.0, frozen)[0] # 1.0 if doesn't converge
def afprob (dfnum, dfden, F):
"""
Returns the 1-tailed significance level (p-value) of an F statistic
given the degrees of freedom for the numerator (dfR-dfF) and the degrees
of freedom for the denominator (dfF). Can handle multiple dims for F.
Usage: afprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn
"""
if type(F) == N.ndarray:
return abetai(0.5*dfden, 0.5*dfnum, dfden/(1.0*dfden+dfnum*F))
else:
return abetai(0.5*dfden, 0.5*dfnum, dfden/float(dfden+dfnum*F))
def abetacf(a,b,x,verbose=1):
"""
Evaluates the continued fraction form of the incomplete Beta function,
betai. (Adapted from: Numerical Recipies in C.) Can handle multiple
dimensions for x.
Usage: abetacf(a,b,x,verbose=1)
"""
ITMAX = 200
EPS = 3.0e-7
arrayflag = 1
if type(x) == N.ndarray:
frozen = N.ones(x.shape,N.float_) *-1 #start out w/ -1s, should replace all
else:
arrayflag = 0
frozen = N.array([-1])
x = N.array([x])
mask = N.zeros(x.shape)
bm = az = am = 1.0
qab = a+b
qap = a+1.0
qam = a-1.0
bz = 1.0-qab*x/qap
for i in range(ITMAX+1):
if N.sum(N.ravel(N.equal(frozen,-1)))==0:
break
em = float(i+1)
tem = em + em
d = em*(b-em)*x/((qam+tem)*(a+tem))
ap = az + d*am
bp = bz+d*bm
d = -(a+em)*(qab+em)*x/((qap+tem)*(a+tem))
app = ap+d*az
bpp = bp+d*bz
aold = az*1
am = ap/bpp
bm = bp/bpp
az = app/bpp
bz = 1.0
newmask = N.less(abs(az-aold),EPS*abs(az))
frozen = N.where(newmask*N.equal(mask,0), az, frozen)
mask = N.clip(mask+newmask,0,1)
noconverge = asum(N.equal(frozen,-1))
if noconverge <> 0 and verbose:
print 'a or b too big, or ITMAX too small in Betacf for ',noconverge,' elements'
if arrayflag:
return frozen
else:
return frozen[0]
def agammln(xx):
"""
Returns the gamma function of xx.
Gamma(z) = Integral(0,infinity) of t^(z-1)exp(-t) dt.
Adapted from: Numerical Recipies in C. Can handle multiple dims ... but
probably doesn't normally have to.
Usage: agammln(xx)
"""
coeff = [76.18009173, -86.50532033, 24.01409822, -1.231739516,
0.120858003e-2, -0.536382e-5]
x = xx - 1.0
tmp = x + 5.5
tmp = tmp - (x+0.5)*N.log(tmp)
ser = 1.0
for j in range(len(coeff)):
x = x + 1
ser = ser + coeff[j]/x
return -tmp + N.log(2.50662827465*ser)
def abetai(a,b,x,verbose=1):
"""
Returns the incomplete beta function:
I-sub-x(a,b) = 1/B(a,b)*(Integral(0,x) of t^(a-1)(1-t)^(b-1) dt)
where a,b>0 and B(a,b) = G(a)*G(b)/(G(a+b)) where G(a) is the gamma
function of a. The continued fraction formulation is implemented
here, using the betacf function. (Adapted from: Numerical Recipies in
C.) Can handle multiple dimensions.
Usage: abetai(a,b,x,verbose=1)
"""
TINY = 1e-15
if type(a) == N.ndarray:
if asum(N.less(x,0)+N.greater(x,1)) <> 0:
raise ValueError, 'Bad x in abetai'
x = N.where(N.equal(x,0),TINY,x)
x = N.where(N.equal(x,1.0),1-TINY,x)
bt = N.where(N.equal(x,0)+N.equal(x,1), 0, -1)
exponents = ( gammln(a+b)-gammln(a)-gammln(b)+a*N.log(x)+b*
N.log(1.0-x) )
# 746 (below) is the MAX POSSIBLE BEFORE OVERFLOW
exponents = N.where(N.less(exponents,-740),-740,exponents)
bt = N.exp(exponents)
if type(x) == N.ndarray:
ans = N.where(N.less(x,(a+1)/(a+b+2.0)),
bt*abetacf(a,b,x,verbose)/float(a),
1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b))
else:
if x<(a+1)/(a+b+2.0):
ans = bt*abetacf(a,b,x,verbose)/float(a)
else:
ans = 1.0-bt*abetacf(b,a,1.0-x,verbose)/float(b)
return ans
#####################################
####### AANOVA CALCULATIONS #######
#####################################
import LinearAlgebra, operator
LA = LinearAlgebra
def aglm(data,para):
"""
Calculates a linear model fit ... anova/ancova/lin-regress/t-test/etc. Taken
from:
Peterson et al. Statistical limitations in functional neuroimaging
I. Non-inferential methods and statistical models. Phil Trans Royal Soc
Lond B 354: 1239-1260.
Usage: aglm(data,para)
Returns: statistic, p-value ???
"""
if len(para) <> len(data):
print "data and para must be same length in aglm"
return
n = len(para)
p = pstat.aunique(para)
x = N.zeros((n,len(p))) # design matrix
for l in range(len(p)):
x[:,l] = N.equal(para,p[l])
b = N.dot(N.dot(LA.inv(N.dot(N.transpose(x),x)), # i.e., b=inv(X'X)X'Y
N.transpose(x)),
data)
diffs = (data - N.dot(x,b))
s_sq = 1./(n-len(p)) * N.dot(N.transpose(diffs), diffs)
if len(p) == 2: # ttest_ind
c = N.array([1,-1])
df = n-2
fact = asum(1.0/asum(x,0)) # i.e., 1/n1 + 1/n2 + 1/n3 ...
t = N.dot(c,b) / N.sqrt(s_sq*fact)
probs = abetai(0.5*df,0.5,float(df)/(df+t*t))
return t, probs
def aF_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: aF_oneway (*args) where *args is 2 or more arrays, one per
treatment group
Returns: f-value, probability
"""
na = len(args) # ANOVA on 'na' groups, each in it's own array
means = [0]*na
vars = [0]*na
ns = [0]*na
alldata = []
tmp = map(N.array,args)
means = map(amean,tmp)
vars = map(avar,tmp)
ns = map(len,args)
alldata = N.concatenate(args)
bign = len(alldata)
sstot = ass(alldata)-(asquare_of_sums(alldata)/float(bign))
ssbn = 0
for a in args:
ssbn = ssbn + asquare_of_sums(N.array(a))/float(len(a))
ssbn = ssbn - (asquare_of_sums(alldata)/float(bign))
sswn = sstot-ssbn
dfbn = na-1
dfwn = bign - na
msb = ssbn/float(dfbn)
msw = sswn/float(dfwn)
f = msb/msw
prob = fprob(dfbn,dfwn,f)
return f, prob
def aF_value (ER,EF,dfR,dfF):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
"""
return ((ER-EF)/float(dfR-dfF) / (EF/float(dfF)))
def outputfstats(Enum, Eden, dfnum, dfden, f, prob):
Enum = round(Enum,3)
Eden = round(Eden,3)
dfnum = round(Enum,3)
dfden = round(dfden,3)
f = round(f,3)
prob = round(prob,3)
suffix = '' # for *s after the p-value
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['EF/ER','DF','Mean Square','F-value','prob','']]
lofl = title+[[Enum, dfnum, round(Enum/float(dfnum),3), f, prob, suffix],
[Eden, dfden, round(Eden/float(dfden),3),'','','']]
pstat.printcc(lofl)
return
def F_value_multivariate(ER, EF, dfnum, dfden):
"""
Returns an F-statistic given the following:
ER = error associated with the null hypothesis (the Restricted model)
EF = error associated with the alternate hypothesis (the Full model)
dfR = degrees of freedom the Restricted model
dfF = degrees of freedom associated with the Restricted model
where ER and EF are matrices from a multivariate F calculation.
"""
if type(ER) in [IntType, FloatType]:
ER = N.array([[ER]])
if type(EF) in [IntType, FloatType]:
EF = N.array([[EF]])
n_um = (LA.det(ER) - LA.det(EF)) / float(dfnum)
d_en = LA.det(EF) / float(dfden)
return n_um / d_en
#####################################
####### ASUPPORT FUNCTIONS ########
#####################################
def asign(a):
"""
Usage: asign(a)
Returns: array shape of a, with -1 where a<0 and +1 where a>=0
"""
a = N.asarray(a)
if ((type(a) == type(1.4)) or (type(a) == type(1))):
return a-a-N.less(a,0)+N.greater(a,0)
else:
return N.zeros(N.shape(a))-N.less(a,0)+N.greater(a,0)
def asum (a, dimension=None,keepdims=0):
"""
An alternative to the Numeric.add.reduce function, which allows one to
(1) collapse over multiple dimensions at once, and/or (2) to retain
all dimensions in the original array (squashing one down to size.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the resulting array will have as many
dimensions as the input array.
Usage: asum(a, dimension=None, keepdims=0)
Returns: array summed along 'dimension'(s), same _number_ of dims if keepdims=1
"""
if type(a) == N.ndarray and a.dtype in [N.int_, N.short, N.ubyte]:
a = a.astype(N.float_)
if dimension == None:
s = N.sum(N.ravel(a))
elif type(dimension) in [IntType,FloatType]:
s = N.add.reduce(a, dimension)
if keepdims == 1:
shp = list(a.shape)
shp[dimension] = 1
s = N.reshape(s,shp)
else: # must be a SEQUENCE of dims to sum over
dims = list(dimension)
dims.sort()
dims.reverse()
s = a *1.0
for dim in dims:
s = N.add.reduce(s,dim)
if keepdims == 1:
shp = list(a.shape)
for dim in dims:
shp[dim] = 1
s = N.reshape(s,shp)
return s
def acumsum (a,dimension=None):
"""
Returns an array consisting of the cumulative sum of the items in the
passed array. Dimension can equal None (ravel array first), an
integer (the dimension over which to operate), or a sequence (operate
over multiple dimensions, but this last one just barely makes sense).
Usage: acumsum(a,dimension=None)
"""
if dimension == None:
a = N.ravel(a)
dimension = 0
if type(dimension) in [ListType, TupleType, N.ndarray]:
dimension = list(dimension)
dimension.sort()
dimension.reverse()
for d in dimension:
a = N.add.accumulate(a,d)
return a
else:
return N.add.accumulate(a,dimension)
def ass(inarray, dimension=None, keepdims=0):
"""
Squares each value in the passed array, adds these squares & returns
the result. Unfortunate function name. :-) Defaults to ALL values in
the array. Dimension can equal None (ravel array first), an integer
(the dimension over which to operate), or a sequence (operate over
multiple dimensions). Set keepdims=1 to maintain the original number
of dimensions.
Usage: ass(inarray, dimension=None, keepdims=0)
Returns: sum-along-'dimension' for (inarray*inarray)
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
return asum(inarray*inarray,dimension,keepdims)
def asummult (array1,array2,dimension=None,keepdims=0):
"""
Multiplies elements in array1 and array2, element by element, and
returns the sum (along 'dimension') of all resulting multiplications.
Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). A trivial function, but included for completeness.
Usage: asummult(array1,array2,dimension=None,keepdims=0)
"""
if dimension == None:
array1 = N.ravel(array1)
array2 = N.ravel(array2)
dimension = 0
return asum(array1*array2,dimension,keepdims)
def asquare_of_sums(inarray, dimension=None, keepdims=0):
"""
Adds the values in the passed array, squares that sum, and returns the
result. Dimension can equal None (ravel array first), an integer (the
dimension over which to operate), or a sequence (operate over multiple
dimensions). If keepdims=1, the returned array will have the same
NUMBER of dimensions as the original.
Usage: asquare_of_sums(inarray, dimension=None, keepdims=0)
Returns: the square of the sum over dim(s) in dimension
"""
if dimension == None:
inarray = N.ravel(inarray)
dimension = 0
s = asum(inarray,dimension,keepdims)
if type(s) == N.ndarray:
return s.astype(N.float_)*s
else:
return float(s)*s
def asumdiffsquared(a,b, dimension=None, keepdims=0):
"""
Takes pairwise differences of the values in arrays a and b, squares
these differences, and returns the sum of these squares. Dimension
can equal None (ravel array first), an integer (the dimension over
which to operate), or a sequence (operate over multiple dimensions).
keepdims=1 means the return shape = len(a.shape) = len(b.shape)
Usage: asumdiffsquared(a,b)
Returns: sum[ravel(a-b)**2]
"""
if dimension == None:
inarray = N.ravel(a)
dimension = 0
return asum((a-b)**2,dimension,keepdims)
def ashellsort(inarray):
"""
Shellsort algorithm. Sorts a 1D-array.
Usage: ashellsort(inarray)
Returns: sorted-inarray, sorting-index-vector (for original array)
"""
n = len(inarray)
svec = inarray *1.0
ivec = range(n)
gap = n/2 # integer division needed
while gap >0:
for i in range(gap,n):
for j in range(i-gap,-1,-gap):
while j>=0 and svec[j]>svec[j+gap]:
temp = svec[j]
svec[j] = svec[j+gap]
svec[j+gap] = temp
itemp = ivec[j]
ivec[j] = ivec[j+gap]
ivec[j+gap] = itemp
gap = gap / 2 # integer division needed
# svec is now sorted input vector, ivec has the order svec[i] = vec[ivec[i]]
return svec, ivec
def arankdata(inarray):
"""
Ranks the data in inarray, dealing with ties appropritely. Assumes
a 1D inarray. Adapted from Gary Perlman's |Stat ranksort.
Usage: arankdata(inarray)
Returns: array of length equal to inarray, containing rank scores
"""
n = len(inarray)
svec, ivec = ashellsort(inarray)
sumranks = 0
dupcount = 0
newarray = N.zeros(n,N.float_)
for i in range(n):
sumranks = sumranks + i
dupcount = dupcount + 1
if i==n-1 or svec[i] <> svec[i+1]:
averank = sumranks / float(dupcount) + 1
for j in range(i-dupcount+1,i+1):
newarray[ivec[j]] = averank
sumranks = 0
dupcount = 0
return newarray
def afindwithin(data):
"""
Returns a binary vector, 1=within-subject factor, 0=between. Input
equals the entire data array (i.e., column 1=random factor, last
column = measured values.
Usage: afindwithin(data) data in |Stat format
"""
numfact = len(data[0])-2
withinvec = [0]*numfact
for col in range(1,numfact+1):
rows = pstat.linexand(data,col,pstat.unique(pstat.colex(data,1))[0]) # get 1 level of this factor
if len(pstat.unique(pstat.colex(rows,0))) < len(rows): # if fewer subjects than scores on this factor
withinvec[col-1] = 1
return withinvec
#########################################################
#########################################################
###### RE-DEFINE DISPATCHES TO INCLUDE ARRAYS #########
#########################################################
#########################################################
## CENTRAL TENDENCY:
geometricmean = Dispatch ( (lgeometricmean, (ListType, TupleType)),
(ageometricmean, (N.ndarray,)) )
harmonicmean = Dispatch ( (lharmonicmean, (ListType, TupleType)),
(aharmonicmean, (N.ndarray,)) )
mean = Dispatch ( (lmean, (ListType, TupleType)),
(amean, (N.ndarray,)) )
median = Dispatch ( (lmedian, (ListType, TupleType)),
(amedian, (N.ndarray,)) )
medianscore = Dispatch ( (lmedianscore, (ListType, TupleType)),
(amedianscore, (N.ndarray,)) )
mode = Dispatch ( (lmode, (ListType, TupleType)),
(amode, (N.ndarray,)) )
tmean = Dispatch ( (atmean, (N.ndarray,)) )
tvar = Dispatch ( (atvar, (N.ndarray,)) )
tstdev = Dispatch ( (atstdev, (N.ndarray,)) )
tsem = Dispatch ( (atsem, (N.ndarray,)) )
## VARIATION:
moment = Dispatch ( (lmoment, (ListType, TupleType)),
(amoment, (N.ndarray,)) )
variation = Dispatch ( (lvariation, (ListType, TupleType)),
(avariation, (N.ndarray,)) )
skew = Dispatch ( (lskew, (ListType, TupleType)),
(askew, (N.ndarray,)) )
kurtosis = Dispatch ( (lkurtosis, (ListType, TupleType)),
(akurtosis, (N.ndarray,)) )
describe = Dispatch ( (ldescribe, (ListType, TupleType)),
(adescribe, (N.ndarray,)) )
## DISTRIBUTION TESTS
skewtest = Dispatch ( (askewtest, (ListType, TupleType)),
(askewtest, (N.ndarray,)) )
kurtosistest = Dispatch ( (akurtosistest, (ListType, TupleType)),
(akurtosistest, (N.ndarray,)) )
normaltest = Dispatch ( (anormaltest, (ListType, TupleType)),
(anormaltest, (N.ndarray,)) )
## FREQUENCY STATS:
itemfreq = Dispatch ( (litemfreq, (ListType, TupleType)),
(aitemfreq, (N.ndarray,)) )
scoreatpercentile = Dispatch ( (lscoreatpercentile, (ListType, TupleType)),
(ascoreatpercentile, (N.ndarray,)) )
percentileofscore = Dispatch ( (lpercentileofscore, (ListType, TupleType)),
(apercentileofscore, (N.ndarray,)) )
histogram = Dispatch ( (lhistogram, (ListType, TupleType)),
(ahistogram, (N.ndarray,)) )
cumfreq = Dispatch ( (lcumfreq, (ListType, TupleType)),
(acumfreq, (N.ndarray,)) )
relfreq = Dispatch ( (lrelfreq, (ListType, TupleType)),
(arelfreq, (N.ndarray,)) )
## VARIABILITY:
obrientransform = Dispatch ( (lobrientransform, (ListType, TupleType)),
(aobrientransform, (N.ndarray,)) )
samplevar = Dispatch ( (lsamplevar, (ListType, TupleType)),
(asamplevar, (N.ndarray,)) )
samplestdev = Dispatch ( (lsamplestdev, (ListType, TupleType)),
(asamplestdev, (N.ndarray,)) )
signaltonoise = Dispatch( (asignaltonoise, (N.ndarray,)),)
var = Dispatch ( (lvar, (ListType, TupleType)),
(avar, (N.ndarray,)) )
stdev = Dispatch ( (lstdev, (ListType, TupleType)),
(astdev, (N.ndarray,)) )
sterr = Dispatch ( (lsterr, (ListType, TupleType)),
(asterr, (N.ndarray,)) )
sem = Dispatch ( (lsem, (ListType, TupleType)),
(asem, (N.ndarray,)) )
z = Dispatch ( (lz, (ListType, TupleType)),
(az, (N.ndarray,)) )
zs = Dispatch ( (lzs, (ListType, TupleType)),
(azs, (N.ndarray,)) )
## TRIMMING FCNS:
threshold = Dispatch( (athreshold, (N.ndarray,)),)
trimboth = Dispatch ( (ltrimboth, (ListType, TupleType)),
(atrimboth, (N.ndarray,)) )
trim1 = Dispatch ( (ltrim1, (ListType, TupleType)),
(atrim1, (N.ndarray,)) )
## CORRELATION FCNS:
paired = Dispatch ( (lpaired, (ListType, TupleType)),
(apaired, (N.ndarray,)) )
lincc = Dispatch ( (llincc, (ListType, TupleType)),
(alincc, (N.ndarray,)) )
pearsonr = Dispatch ( (lpearsonr, (ListType, TupleType)),
(apearsonr, (N.ndarray,)) )
spearmanr = Dispatch ( (lspearmanr, (ListType, TupleType)),
(aspearmanr, (N.ndarray,)) )
pointbiserialr = Dispatch ( (lpointbiserialr, (ListType, TupleType)),
(apointbiserialr, (N.ndarray,)) )
kendalltau = Dispatch ( (lkendalltau, (ListType, TupleType)),
(akendalltau, (N.ndarray,)) )
linregress = Dispatch ( (llinregress, (ListType, TupleType)),
(alinregress, (N.ndarray,)) )
## INFERENTIAL STATS:
ttest_1samp = Dispatch ( (lttest_1samp, (ListType, TupleType)),
(attest_1samp, (N.ndarray,)) )
ttest_ind = Dispatch ( (lttest_ind, (ListType, TupleType)),
(attest_ind, (N.ndarray,)) )
ttest_rel = Dispatch ( (lttest_rel, (ListType, TupleType)),
(attest_rel, (N.ndarray,)) )
chisquare = Dispatch ( (lchisquare, (ListType, TupleType)),
(achisquare, (N.ndarray,)) )
ks_2samp = Dispatch ( (lks_2samp, (ListType, TupleType)),
(aks_2samp, (N.ndarray,)) )
mannwhitneyu = Dispatch ( (lmannwhitneyu, (ListType, TupleType)),
(amannwhitneyu, (N.ndarray,)) )
tiecorrect = Dispatch ( (ltiecorrect, (ListType, TupleType)),
(atiecorrect, (N.ndarray,)) )
ranksums = Dispatch ( (lranksums, (ListType, TupleType)),
(aranksums, (N.ndarray,)) )
wilcoxont = Dispatch ( (lwilcoxont, (ListType, TupleType)),
(awilcoxont, (N.ndarray,)) )
kruskalwallish = Dispatch ( (lkruskalwallish, (ListType, TupleType)),
(akruskalwallish, (N.ndarray,)) )
friedmanchisquare = Dispatch ( (lfriedmanchisquare, (ListType, TupleType)),
(afriedmanchisquare, (N.ndarray,)) )
## PROBABILITY CALCS:
chisqprob = Dispatch ( (lchisqprob, (IntType, FloatType)),
(achisqprob, (N.ndarray,)) )
zprob = Dispatch ( (lzprob, (IntType, FloatType)),
(azprob, (N.ndarray,)) )
ksprob = Dispatch ( (lksprob, (IntType, FloatType)),
(aksprob, (N.ndarray,)) )
fprob = Dispatch ( (lfprob, (IntType, FloatType)),
(afprob, (N.ndarray,)) )
betacf = Dispatch ( (lbetacf, (IntType, FloatType)),
(abetacf, (N.ndarray,)) )
betai = Dispatch ( (lbetai, (IntType, FloatType)),
(abetai, (N.ndarray,)) )
erfcc = Dispatch ( (lerfcc, (IntType, FloatType)),
(aerfcc, (N.ndarray,)) )
gammln = Dispatch ( (lgammln, (IntType, FloatType)),
(agammln, (N.ndarray,)) )
## ANOVA FUNCTIONS:
F_oneway = Dispatch ( (lF_oneway, (ListType, TupleType)),
(aF_oneway, (N.ndarray,)) )
F_value = Dispatch ( (lF_value, (ListType, TupleType)),
(aF_value, (N.ndarray,)) )
## SUPPORT FUNCTIONS:
incr = Dispatch ( (lincr, (ListType, TupleType, N.ndarray)), )
sum = Dispatch ( (lsum, (ListType, TupleType)),
(asum, (N.ndarray,)) )
cumsum = Dispatch ( (lcumsum, (ListType, TupleType)),
(acumsum, (N.ndarray,)) )
ss = Dispatch ( (lss, (ListType, TupleType)),
(ass, (N.ndarray,)) )
summult = Dispatch ( (lsummult, (ListType, TupleType)),
(asummult, (N.ndarray,)) )
square_of_sums = Dispatch ( (lsquare_of_sums, (ListType, TupleType)),
(asquare_of_sums, (N.ndarray,)) )
sumdiffsquared = Dispatch ( (lsumdiffsquared, (ListType, TupleType)),
(asumdiffsquared, (N.ndarray,)) )
shellsort = Dispatch ( (lshellsort, (ListType, TupleType)),
(ashellsort, (N.ndarray,)) )
rankdata = Dispatch ( (lrankdata, (ListType, TupleType)),
(arankdata, (N.ndarray,)) )
findwithin = Dispatch ( (lfindwithin, (ListType, TupleType)),
(afindwithin, (N.ndarray,)) )
###################### END OF NUMERIC FUNCTION BLOCK #####################
###################### END OF STATISTICAL FUNCTIONS ######################
except ImportError:
pass
| {
"content_hash": "86f7a52fb266bb30b00ca468cfb0074b",
"timestamp": "",
"source": "github",
"line_count": 4497,
"max_line_length": 177,
"avg_line_length": 34.68957082499444,
"alnum_prop": 0.5941320136667543,
"repo_name": "sniemi/SamPy",
"id": "2481dc678a6f8be88a2071b86e2f499cf79a9f23",
"size": "157083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandbox/src1/stats.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "296"
},
{
"name": "C",
"bytes": "68436"
},
{
"name": "C++",
"bytes": "45956"
},
{
"name": "CSS",
"bytes": "35570"
},
{
"name": "Fortran",
"bytes": "45191"
},
{
"name": "HTML",
"bytes": "107435"
},
{
"name": "IDL",
"bytes": "13651"
},
{
"name": "JavaScript",
"bytes": "25435"
},
{
"name": "Makefile",
"bytes": "26035"
},
{
"name": "Matlab",
"bytes": "1508"
},
{
"name": "Perl",
"bytes": "59198"
},
{
"name": "PostScript",
"bytes": "1403536"
},
{
"name": "Prolog",
"bytes": "16061"
},
{
"name": "Python",
"bytes": "5763358"
},
{
"name": "R",
"bytes": "208346"
},
{
"name": "Rebol",
"bytes": "161"
},
{
"name": "Roff",
"bytes": "73616"
},
{
"name": "Ruby",
"bytes": "2032"
},
{
"name": "Shell",
"bytes": "41512"
},
{
"name": "Tcl",
"bytes": "44150"
},
{
"name": "TeX",
"bytes": "107783"
}
],
"symlink_target": ""
} |
import os
import tempfile
import psycopg2
import json
from hashlib import md5
from copy import deepcopy
from collections import defaultdict
from wextractor.loaders.loader import Loader
class PostgresLoader(Loader):
def __init__(self, connection_params, schema=None):
super(PostgresLoader, self).__init__(connection_params, schema)
if self.schema is None:
self.schema = []
for table_schema in self.schema:
if table_schema.get('columns', None) is None:
raise Exception('Tables must contain columns')
elif not isinstance(table_schema['columns'][0], tuple):
raise Exception('Table columns must be tuples')
elif len(table_schema['columns'][0]) == 1:
raise Exception('Column Types are not specified')
def connect(self):
'''
The connect method implements the logic behind
the psycopg2 connection. Try/catch/finally
logic should be implemented outside this method
to ensure that the database connection always
closes when appropriate.
For PostgresLoader, the connection_params
must include at least a database and user.
It can also optionally include a hostname, port,
and password
'''
database = self.connection_params.get('database', None)
user = self.connection_params.get('user', None)
if not database or not user:
raise Exception('PostgresLoader must contain "database" and "user" keys')
conn = psycopg2.connect(**self.connection_params)
return conn
def generate_drop_table_query(self, table_schema):
'''
Generates a cascanding drop table query that will drop
all tables and their relations
'''
drop_query = '''DROP TABLE IF EXISTS {table} CASCADE'''.format(
table=table_schema['table_name']
)
return drop_query
def generate_create_table_query(self, table_schema):
'''
Geneates a create table query and raises exceptions
if the table schema generation is malformed
'''
if len(table_schema['columns'][0]) == 2:
coldefs = 'row_id SERIAL,'
coldefs += ','.join(
'{name} {dtype}'.format(name=name, dtype=dtype) for name, dtype in table_schema['columns']
)
create_query = '''CREATE TABLE IF NOT EXISTS {table} ({coldefs}, PRIMARY KEY({pkey}))'''.format(
table=table_schema['table_name'],
coldefs=coldefs,
pkey=table_schema['pkey']
)
return create_query
def generate_foreign_key_query(self, table, i=0):
'''
Generates alter table statements that add formal
foreign key relationships. Takes in a schema and
an optional index (defaults to 0) of the positon
of the relationship in the schema
NOTE: This must be called AFTER data is already
loaded. Otherwise, a psycopg2 error will be thrown.
'''
if table.get('from_relations', None) is None:
return
return '''ALTER TABLE {table} ADD FOREIGN KEY ({id}) REFERENCES {relationship}'''.format(
table=table['table_name'],
id=table['from_relations'][i] + '_id',
relationship=table['from_relations'][i]
)
def null_replace(self, field):
'''
Replaces empty string, None with 'NULL' for Postgres loading
'''
if type(field) in [str, unicode]:
if field == '':
return 'NULL'
elif field is None:
return 'NULL'
return field
def hash_row(self, row):
'''
Return an md5 hash of a row's contents (minus its index). This hash
will turn into the table's new primary key.
'''
return md5(json.dumps(row, sort_keys=True)).hexdigest()
def simple_dedupe(self, idx, table):
'''
Takes in a table that has been transformed by the
transform_to_schema method but not been deduplicated.
This method simply attempts to determine if the row
is an exact replica (minus fkeys). If it is, it checks to
make sure the fkeys are the same and handles the event that
the relationships are to different places and thus should
be different rows (by modifying the primary key). Returns
a deduplicated list.
'''
checker, output, pkey, fkey = {}, [], {}, {}
pkey_name = self.schema[idx]['table_name'] + '_id'
fkeys_name = [i + '_id' for i in self.schema[idx].get('from_relations', [])]
for row in table:
# store the value of the primary key
pkey[pkey_name] = row.pop(pkey_name, None)
for key in fkeys_name:
# store the values of the primary key
fkey[key] = row.pop(key, None)
row_as_tuple = tuple(row.items())
# Use try/except because it's faster than checking if
# the key is in the dictionary keys
try:
row_with_fkey = dict(row.items() + fkey.items())
checker[row_as_tuple]['pkey'].add(
(pkey_name, self.hash_row(row_with_fkey))
)
checker[row_as_tuple]['fkey'].add(tuple(fkey.items()))
except KeyError:
# make a deep copy because calling .pop() will update
# every single one of these otherwise
fkey_copy = deepcopy(fkey)
# create a tuple of tuples for proper extraction later
pkey_tuple = ((pkey_name, self.hash_row(row)),)
checker[row_as_tuple] = {
'pkey': set(pkey_tuple),
'fkey': set(tuple(fkey_copy.items()))
}
# We should now have deduplicated everything, so we just
# reshape the checker dictionary into the list of dict format
# that the remainder should expect
for checker_row, table_keys in checker.items():
if len(table_keys['fkey']) == 0 or len(fkeys_name) == 0:
final_output = dict(checker_row)
final_output.update(dict(table_keys['pkey']))
elif len(table_keys['pkey']) == 1 and len(table_keys['fkey']) == 1:
final_output = dict(checker_row)
final_output.update(dict(table_keys['pkey']))
final_output.update(dict(table_keys['fkey']))
elif len(table_keys['pkey']) == len(table_keys['fkey']):
for fkey in table_keys['fkey']:
new_pkey = self.hash_row(checker_row + fkey)
final_output = dict(checker_row)
final_output.update({pkey_name: new_pkey})
try:
final_output.update(dict((fkey,)))
except:
final_output.update(dict(fkey))
else:
# TODO: Implement something to handle this
raise Exception('pkey/fkey mismatch.')
output.append(final_output)
return output
def transform_to_schema(self, data, add_pkey):
'''
Schema for postgres must take the following form:
[
{
'table_name': '',
'pkey': '',
'index': '',
'to_relations': ['table_name', ...],
'from_relations': ['table_name', ...],
'columns': ( ('col_name', col_type), ... ),
}, ...
]
Input data will come in as a list of dictionaries, with
the keys being the column names and the values being the
values. The transformed data will return a list of
dictionaries where each dictionary is a table to write
to the final data store.
Additionally, this method holds a dictionary of like items
and their ids to allow for very simple deduplication.
'''
# start by generating the output list of lists
output = [list() for i in range(len(self.schema))]
holder = [defaultdict(list) for i in range(len(self.schema))]
# initialize a dictionary to hold potential duplicates
deduper = {}
for ix, line in enumerate(data):
for table_idx, table in enumerate(self.schema):
col_names = zip(*table['columns'])[0]
# initialize the new row to add to the final loaded data
new_row = dict()
for cell in line.iteritems():
if cell[0] in col_names:
# extend the new row with the value of the cell
new_row[cell[0]] = str(self.null_replace(cell[1]))
else:
continue
row_id = self.hash_row(new_row)
if add_pkey or table.get('pkey', None) is None:
new_row[table['table_name'] + '_id'] = row_id
else:
new_row[table['pkey']] = row_id
# once we have added all of the data fields, add the relationships
for relationship in table.get('to_relations', []):
# find the index of the matching relationship table
rel_index = next(index for (index, d) in enumerate(self.schema) if d['table_name'] == relationship)
output[rel_index][ix][self.schema[table_idx]['table_name'] + '_id'] = row_id
output[table_idx].extend([new_row])
final_output = []
for table_ix, table in enumerate(output):
final_output.append(self.simple_dedupe(table_ix, table))
return final_output
def generate_data_tempfile(self, data):
'''
Takes in a list and generates a temporary tab-separated
file. This file can then be consumed by the Postgres \COPY
function
'''
tmp_file = tempfile.TemporaryFile(dir=os.getcwd())
if len(data) == 0:
return tmp_file, None
n = 0
for row in data:
row = sorted(row.items())
n += 1
if n % 10000 == 0:
print 'Wrote {n} lines'.format(n=n)
rowstr = '\t'.join([str(n)] + [i[1] for i in row]) + '\n'
tmp_file.write(rowstr)
tmp_file.seek(0)
return tmp_file, ['row_id'] + sorted(data[0].keys())
def load(self, data, add_pkey=True):
'''
Main method for final Postgres loading.
Takes in data and a flag for adding a primary key and
transforms the input data to the proper schema, generates
relationships, does simple deduplcation on exact matches,
writes a tempfile with all of the data, boots up a
connection to Postgres, and loads everything in
'''
conn = None
try:
conn = self.connect()
cursor = conn.cursor()
if not self.schema:
raise Exception('Schemaless loading is not supported by PostgresLoader')
tables = self.transform_to_schema(data, add_pkey)
for ix, table in enumerate(self.schema):
table['columns'] = ( (table['table_name'] + '_id', 'VARCHAR(32)'), ) + table['columns']
if add_pkey or table.get('pkey', None) is None:
table['pkey'] = table['table_name'] + '_id'
if table.get('from_relations', None):
for relationship in table['from_relations']:
table['columns'] += ( ( relationship + '_id', 'VARCHAR(32)' ), )
drop_table = self.generate_drop_table_query(table)
cursor.execute(drop_table)
create_table = self.generate_create_table_query(table)
cursor.execute(create_table)
tmp_file, column_names = self.generate_data_tempfile(tables[ix])
cursor.copy_from(tmp_file, table['table_name'], null='NULL', sep='\t', columns=column_names)
for table in self.schema:
for ix, relationship in enumerate(table.get('from_relations', [])):
fk_query = self.generate_foreign_key_query(table, ix)
cursor.execute(fk_query)
conn.commit()
except psycopg2.Error, e:
if conn:
conn.rollback()
raise e
finally:
if conn:
conn.close()
| {
"content_hash": "89e5f3486fb125361c999f394165043d",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 119,
"avg_line_length": 36.40922190201729,
"alnum_prop": 0.550023745448789,
"repo_name": "codeforamerica/w-drive-extractor",
"id": "8fd7ec1209396c20586695138d3481d40a0b720f",
"size": "12657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wextractor/loaders/postgres.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40190"
}
],
"symlink_target": ""
} |
import logging
import logging.config
import os
class NodepoolApp(object):
def __init__(self):
self.args = None
def setup_logging(self):
if self.args.logconfig:
fp = os.path.expanduser(self.args.logconfig)
if not os.path.exists(fp):
raise Exception("Unable to read logging config file at %s" %
fp)
logging.config.fileConfig(fp)
logging.info('Logging configured from file %s' % fp)
else:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s: '
'%(message)s')
| {
"content_hash": "a7285d3e348b06f65df18db59e8ee30d",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 77,
"avg_line_length": 31.954545454545453,
"alnum_prop": 0.519203413940256,
"repo_name": "seandst/nodepool",
"id": "b19a4b9c77e42f88655af3a7689ae2ca1f00a6b9",
"size": "1370",
"binary": false,
"copies": "1",
"ref": "refs/heads/pulp",
"path": "nodepool/cmd/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "374188"
},
{
"name": "Shell",
"bytes": "12644"
}
],
"symlink_target": ""
} |
"""Utilities for the Nexmark suite.
The Nexmark suite is a series of queries (streaming pipelines) performed
on a simulation of auction events. This util includes:
- A Command class used to terminate the streaming jobs
launched in nexmark_launcher.py by the DirectRunner.
- A ParseEventFn DoFn to parse events received from PubSub.
Usage:
To run a process for a certain duration, define in the code:
command = Command(process_to_terminate, args)
command.run(timeout=duration)
"""
# pytype: skip-file
from __future__ import absolute_import
from __future__ import print_function
import logging
import threading
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.models import nexmark_model
_LOGGER = logging.getLogger(__name__)
class Command(object):
def __init__(self, cmd, args):
self.cmd = cmd
self.args = args
def run(self, timeout):
def thread_target():
logging.debug(
'Starting thread for %d seconds: %s', timeout, self.cmd.__name__)
self.cmd(*self.args)
_LOGGER.info(
'%d seconds elapsed. Thread (%s) finished.',
timeout,
self.cmd.__name__)
thread = threading.Thread(target=thread_target, name='Thread-timeout')
thread.daemon = True
thread.start()
thread.join(timeout)
class ParseEventFn(beam.DoFn):
"""Parses the raw event info into a Python objects.
Each event line has the following format:
person: <id starting with 'p'>,name,email,credit_card,city, \
state,timestamp,extra
auction: <id starting with 'a'>,item_name, description,initial_bid, \
reserve_price,timestamp,expires,seller,category,extra
bid: <auction starting with 'b'>,bidder,price,timestamp,extra
For example:
'p12345,maria,maria@maria.com,1234-5678-9012-3456, \
sunnyvale,CA,1528098831536'
'a12345,car67,2012 hyundai elantra,15000,20000, \
1528098831536,20180630,maria,vehicle'
'b12345,maria,20000,1528098831536'
"""
def process(self, elem):
model_dict = {
'p': nexmark_model.Person,
'a': nexmark_model.Auction,
'b': nexmark_model.Bid,
}
row = elem.split(',')
model = model_dict.get(elem[0])
if not model:
raise ValueError('Invalid event: %s.' % row)
event = model(*row)
logging.debug('Parsed event: %s', event)
yield event
def display(elm):
logging.debug(elm)
return elm
| {
"content_hash": "e6bcd87dc1b31712a7e0a6baf5c9d03a",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 79,
"avg_line_length": 27.725274725274726,
"alnum_prop": 0.6492271105826397,
"repo_name": "iemejia/incubator-beam",
"id": "9079596a1a2ae7ce9f0caf52e78897f9a7c8fb4f",
"size": "3308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
DOCUMENTATION = '''
---
module: maas_boot_resources
short_description: Manage MAAS boot resources
options:
maas:
description:
- URL of MAAS server
default: http://localhost/MAAS/api/1.0/
key:
description:
- MAAS API key
required: yes
state:
description:
- possible states for this sshkey
choices: ['query', 'import']
default: query
requirements: [ipaddress, requests_oauthlib, maasclient]
author: David Bainbridge
'''
EXAMPLES = '''
examples:
maas_boot_resource:
maas: http://my.maas.server.com/MAAS/api/1.0/
key: 'xBvr9dx5k7S52myufC:fqBXV7hJgXegNZDw9c:K8hsmL47XjAppfQy2pDVW7G49p6PELgp'
state: query
'''
import sys
import json
import ipaddress
import requests
from maasclient.auth import MaasAuth
from maasclient import MaasClient
# For some reason the maasclient doesn't provide a put method. So
# we will add it here
def put(client, url, params=None):
return requests.put(url=client.auth.api_url + url,
auth=client._oauth(), data=params)
# Attempt to interpret the given value as a JSON object, if that fails
# just return it as a string
def string_or_object(val):
try:
return json.loads(val)
except:
return val
# Return a copy of the given dictionary with any `null` valued entries
# removed
def remove_null(d_in):
d = d_in.copy()
to_remove = []
for k in d.keys():
if d[k] == None:
to_remove.append(k)
for k in to_remove:
del d[k]
return d
def filter(filter_type, d, keys):
if filter_type == 'include':
for k in d.keys():
if k not in keys:
d.pop(k, None)
else:
for k in d.keys():
if k in keys:
d.pop(k, None)
def main():
module = AnsibleModule(
argument_spec = dict(
maas=dict(default='http://localhost/MAAS/api/1.0/'),
key=dict(required=True),
state=dict(default='query', choices=['query', 'import'])
),
supports_check_mode = False
)
maas = module.params['maas']
key = module.params['key']
state = module.params['state']
# Authenticate into MAAS
auth = MaasAuth(maas, key)
maas = MaasClient(auth)
if state == 'query':
res = maas.get('/boot-resources/')
if res.ok:
module.exit_json(changed=False, resources=json.loads(res.text))
else:
module.fail_json(msg=string_or_object(res.text))
elif state == 'import':
res = maas.post('/boot-resources/', dict(op='import'))
if res.ok:
module.exit_json(changed=True)
else:
module.fail_json(msg=string_or_object(res.text))
else:
module.fail_json(msg='unknown state')
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
if __name__ == '__main__':
main()
| {
"content_hash": "39c187770160f64fbc916e420fea1c80",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 81,
"avg_line_length": 26.09009009009009,
"alnum_prop": 0.6104972375690608,
"repo_name": "opencord/maas",
"id": "4abec69577f9fc683c5c092e6dd396719960f109",
"size": "3511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "library/maas_boot_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "129747"
},
{
"name": "Makefile",
"bytes": "4742"
},
{
"name": "Python",
"bytes": "80511"
},
{
"name": "Shell",
"bytes": "36702"
},
{
"name": "Smarty",
"bytes": "1073"
}
],
"symlink_target": ""
} |
"""
Mercadopago API exceptions.
"""
class NoAccessTokenError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class UndefinedResponseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EmptyCredentialsError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class ExpiredAuthenticationError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| {
"content_hash": "22494d05de4a2d5fc33d4fabffc14dfa",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 44,
"avg_line_length": 17.526315789473685,
"alnum_prop": 0.6051051051051051,
"repo_name": "angvp/pymercadopago",
"id": "7811c90c08ac7af7df13ccc821bd51779eb42743",
"size": "690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymercadopago/mpexceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14098"
}
],
"symlink_target": ""
} |
"""check for new / old style related problems
"""
from logilab import astng
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
MSGS = {
'E1001': ('Use __slots__ on an old style class',
'Used when an old style class use the __slots__ attribute.'),
'E1002': ('Use super on an old style class',
'Used when an old style class use the super builtin.'),
'E1003': ('Bad first argument %r given to super class',
'Used when another argument than the current class is given as \
first argument of the super builtin.'),
'W1001': ('Use of "property" on an old style class',
'Used when PyLint detect the use of the builtin "property" \
on an old style class while this is relying on new style \
classes features'),
}
class NewStyleConflictChecker(BaseChecker):
"""checks for usage of new style capabilities on old style classes and
other new/old styles conflicts problems
* use of property, __slots__, super
* "super" usage
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'newstyle'
# messages
msgs = MSGS
priority = -2
# configuration options
options = ()
# def __init__(self, linter=None):
# BaseChecker.__init__(self, linter)
def visit_class(self, node):
"""check __slots__ usage
"""
if '__slots__' in node and not node.newstyle:
self.add_message('E1001', node=node)
def visit_callfunc(self, node):
"""check property usage"""
parent = node.parent.frame()
if (isinstance(parent, astng.Class) and
not parent.newstyle and
isinstance(node.func, astng.Name)):
name = node.func.name
if name == 'property':
self.add_message('W1001', node=node)
def visit_function(self, node):
"""check use of super"""
# ignore actual functions or method within a new style class
if not node.is_method():
return
klass = node.parent.frame()
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr):
continue
call = expr.expr
# skip the test if using super
if isinstance(call, astng.CallFunc) and \
isinstance(call.func, astng.Name) and \
call.func.name == 'super':
if not klass.newstyle:
# super should not be used on an old style class
self.add_message('E1002', node=node)
else:
# super first arg should be the class
try:
supcls = (call.args and call.args[0].infer().next()
or None)
except astng.InferenceError:
continue
if klass is not supcls:
supcls = getattr(supcls, 'name', supcls)
self.add_message('E1003', node=node, args=supcls)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(NewStyleConflictChecker(linter))
| {
"content_hash": "3e1bae3048a9fbac3cd85a79ca9fd3d4",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 79,
"avg_line_length": 37.83695652173913,
"alnum_prop": 0.5360528583740305,
"repo_name": "dbbhattacharya/kitsune",
"id": "16d17c0ffd9dfc950d1e1a8e2f55d48c52b31336",
"size": "4292",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "vendor/packages/pylint/checkers/newstyle.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "2694"
},
{
"name": "CSS",
"bytes": "276585"
},
{
"name": "HTML",
"bytes": "600145"
},
{
"name": "JavaScript",
"bytes": "800276"
},
{
"name": "Python",
"bytes": "2762831"
},
{
"name": "Shell",
"bytes": "6720"
},
{
"name": "Smarty",
"bytes": "1752"
}
],
"symlink_target": ""
} |
"""
Support for Buienradar.nl weather service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.buienradar/
"""
import asyncio
from datetime import datetime, timedelta
import logging
import async_timeout
import aiohttp
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_LATITUDE, CONF_LONGITUDE,
CONF_MONITORED_CONDITIONS, CONF_NAME, TEMP_CELSIUS)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (
async_track_point_in_utc_time)
from homeassistant.util import dt as dt_util
REQUIREMENTS = ['buienradar==0.91']
_LOGGER = logging.getLogger(__name__)
MEASURED_LABEL = 'Measured'
TIMEFRAME_LABEL = 'Timeframe'
SYMBOL = 'symbol'
# Schedule next call after (minutes):
SCHEDULE_OK = 10
# When an error occurred, new call after (minutes):
SCHEDULE_NOK = 2
# Supported sensor types:
# Key: ['label', unit, icon]
SENSOR_TYPES = {
'stationname': ['Stationname', None, None],
'condition': ['Condition', None, None],
'conditioncode': ['Condition code', None, None],
'conditiondetailed': ['Detailed condition', None, None],
'conditionexact': ['Full condition', None, None],
'symbol': ['Symbol', None, None],
'humidity': ['Humidity', '%', 'mdi:water-percent'],
'temperature': ['Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'groundtemperature': ['Ground temperature', TEMP_CELSIUS,
'mdi:thermometer'],
'windspeed': ['Wind speed', 'm/s', 'mdi:weather-windy'],
'windforce': ['Wind force', 'Bft', 'mdi:weather-windy'],
'winddirection': ['Wind direction', None, 'mdi:compass-outline'],
'windazimuth': ['Wind direction azimuth', '°', 'mdi:compass-outline'],
'pressure': ['Pressure', 'hPa', 'mdi:gauge'],
'visibility': ['Visibility', 'm', None],
'windgust': ['Wind gust', 'm/s', 'mdi:weather-windy'],
'precipitation': ['Precipitation', 'mm/h', 'mdi:weather-pouring'],
'irradiance': ['Irradiance', 'W/m2', 'mdi:sunglasses'],
'precipitation_forecast_average': ['Precipitation forecast average',
'mm/h', 'mdi:weather-pouring'],
'precipitation_forecast_total': ['Precipitation forecast total',
'mm', 'mdi:weather-pouring'],
'temperature_1d': ['Temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_2d': ['Temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_3d': ['Temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_4d': ['Temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'temperature_5d': ['Temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_1d': ['Minimum temperature 1d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_2d': ['Minimum temperature 2d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_3d': ['Minimum temperature 3d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_4d': ['Minimum temperature 4d', TEMP_CELSIUS, 'mdi:thermometer'],
'mintemp_5d': ['Minimum temperature 5d', TEMP_CELSIUS, 'mdi:thermometer'],
'rain_1d': ['Rain 1d', 'mm', 'mdi:weather-pouring'],
'rain_2d': ['Rain 2d', 'mm', 'mdi:weather-pouring'],
'rain_3d': ['Rain 3d', 'mm', 'mdi:weather-pouring'],
'rain_4d': ['Rain 4d', 'mm', 'mdi:weather-pouring'],
'rain_5d': ['Rain 5d', 'mm', 'mdi:weather-pouring'],
'snow_1d': ['Snow 1d', 'cm', 'mdi:snowflake'],
'snow_2d': ['Snow 2d', 'cm', 'mdi:snowflake'],
'snow_3d': ['Snow 3d', 'cm', 'mdi:snowflake'],
'snow_4d': ['Snow 4d', 'cm', 'mdi:snowflake'],
'snow_5d': ['Snow 5d', 'cm', 'mdi:snowflake'],
'rainchance_1d': ['Rainchance 1d', '%', 'mdi:weather-pouring'],
'rainchance_2d': ['Rainchance 2d', '%', 'mdi:weather-pouring'],
'rainchance_3d': ['Rainchance 3d', '%', 'mdi:weather-pouring'],
'rainchance_4d': ['Rainchance 4d', '%', 'mdi:weather-pouring'],
'rainchance_5d': ['Rainchance 5d', '%', 'mdi:weather-pouring'],
'sunchance_1d': ['Sunchance 1d', '%', 'mdi:weather-partlycloudy'],
'sunchance_2d': ['Sunchance 2d', '%', 'mdi:weather-partlycloudy'],
'sunchance_3d': ['Sunchance 3d', '%', 'mdi:weather-partlycloudy'],
'sunchance_4d': ['Sunchance 4d', '%', 'mdi:weather-partlycloudy'],
'sunchance_5d': ['Sunchance 5d', '%', 'mdi:weather-partlycloudy'],
'windforce_1d': ['Wind force 1d', 'Bft', 'mdi:weather-windy'],
'windforce_2d': ['Wind force 2d', 'Bft', 'mdi:weather-windy'],
'windforce_3d': ['Wind force 3d', 'Bft', 'mdi:weather-windy'],
'windforce_4d': ['Wind force 4d', 'Bft', 'mdi:weather-windy'],
'windforce_5d': ['Wind force 5d', 'Bft', 'mdi:weather-windy'],
'condition_1d': ['Condition 1d', None, None],
'condition_2d': ['Condition 2d', None, None],
'condition_3d': ['Condition 3d', None, None],
'condition_4d': ['Condition 4d', None, None],
'condition_5d': ['Condition 5d', None, None],
'conditioncode_1d': ['Condition code 1d', None, None],
'conditioncode_2d': ['Condition code 2d', None, None],
'conditioncode_3d': ['Condition code 3d', None, None],
'conditioncode_4d': ['Condition code 4d', None, None],
'conditioncode_5d': ['Condition code 5d', None, None],
'conditiondetailed_1d': ['Detailed condition 1d', None, None],
'conditiondetailed_2d': ['Detailed condition 2d', None, None],
'conditiondetailed_3d': ['Detailed condition 3d', None, None],
'conditiondetailed_4d': ['Detailed condition 4d', None, None],
'conditiondetailed_5d': ['Detailed condition 5d', None, None],
'conditionexact_1d': ['Full condition 1d', None, None],
'conditionexact_2d': ['Full condition 2d', None, None],
'conditionexact_3d': ['Full condition 3d', None, None],
'conditionexact_4d': ['Full condition 4d', None, None],
'conditionexact_5d': ['Full condition 5d', None, None],
'symbol_1d': ['Symbol 1d', None, None],
'symbol_2d': ['Symbol 2d', None, None],
'symbol_3d': ['Symbol 3d', None, None],
'symbol_4d': ['Symbol 4d', None, None],
'symbol_5d': ['Symbol 5d', None, None],
}
CONF_TIMEFRAME = 'timeframe'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_CONDITIONS,
default=['symbol', 'temperature']): vol.All(
cv.ensure_list, vol.Length(min=1),
[vol.In(SENSOR_TYPES.keys())]),
vol.Inclusive(CONF_LATITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, 'coordinates',
'Latitude and longitude must exist together'): cv.longitude,
vol.Optional(CONF_TIMEFRAME, default=60):
vol.All(vol.Coerce(int), vol.Range(min=5, max=120)),
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Create the buienradar sensor."""
from homeassistant.components.weather.buienradar import DEFAULT_TIMEFRAME
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
timeframe = config.get(CONF_TIMEFRAME, DEFAULT_TIMEFRAME)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in HomeAssistant config")
return False
coordinates = {CONF_LATITUDE: float(latitude),
CONF_LONGITUDE: float(longitude)}
_LOGGER.debug("Initializing buienradar sensor coordinate %s, timeframe %s",
coordinates, timeframe)
dev = []
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
dev.append(BrSensor(sensor_type, config.get(CONF_NAME, 'br'),
coordinates))
async_add_entities(dev)
data = BrData(hass, coordinates, timeframe, dev)
# schedule the first update in 1 minute from now:
yield from data.schedule_update(1)
class BrSensor(Entity):
"""Representation of an Buienradar sensor."""
def __init__(self, sensor_type, client_name, coordinates):
"""Initialize the sensor."""
from buienradar.buienradar import (PRECIPITATION_FORECAST, CONDITION)
self.client_name = client_name
self._name = SENSOR_TYPES[sensor_type][0]
self.type = sensor_type
self._state = None
self._unit_of_measurement = SENSOR_TYPES[self.type][1]
self._entity_picture = None
self._attribution = None
self._measured = None
self._stationname = None
self._unique_id = self.uid(coordinates)
# All continuous sensors should be forced to be updated
self._force_update = self.type != SYMBOL and \
not self.type.startswith(CONDITION)
if self.type.startswith(PRECIPITATION_FORECAST):
self._timeframe = None
def uid(self, coordinates):
"""Generate a unique id using coordinates and sensor type."""
# The combination of the location, name and sensor type is unique
return "%2.6f%2.6f%s" % (coordinates[CONF_LATITUDE],
coordinates[CONF_LONGITUDE],
self.type)
def load_data(self, data):
"""Load the sensor with relevant data."""
# Find sensor
from buienradar.buienradar import (ATTRIBUTION, CONDITION, CONDCODE,
DETAILED, EXACT, EXACTNL, FORECAST,
IMAGE, MEASURED,
PRECIPITATION_FORECAST, STATIONNAME,
TIMEFRAME)
# Check if we have a new measurement,
# otherwise we do not have to update the sensor
if self._measured == data.get(MEASURED):
return False
self._attribution = data.get(ATTRIBUTION)
self._stationname = data.get(STATIONNAME)
self._measured = data.get(MEASURED)
if self.type.endswith('_1d') or \
self.type.endswith('_2d') or \
self.type.endswith('_3d') or \
self.type.endswith('_4d') or \
self.type.endswith('_5d'):
fcday = 0
if self.type.endswith('_2d'):
fcday = 1
if self.type.endswith('_3d'):
fcday = 2
if self.type.endswith('_4d'):
fcday = 3
if self.type.endswith('_5d'):
fcday = 4
# update all other sensors
if self.type.startswith(SYMBOL) or self.type.startswith(CONDITION):
try:
condition = data.get(FORECAST)[fcday].get(CONDITION)
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if condition:
new_state = condition.get(CONDITION, None)
if self.type.startswith(SYMBOL):
new_state = condition.get(EXACTNL, None)
if self.type.startswith('conditioncode'):
new_state = condition.get(CONDCODE, None)
if self.type.startswith('conditiondetailed'):
new_state = condition.get(DETAILED, None)
if self.type.startswith('conditionexact'):
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
try:
self._state = data.get(FORECAST)[fcday].get(self.type[:-3])
return True
except IndexError:
_LOGGER.warning("No forecast for fcday=%s...", fcday)
return False
if self.type == SYMBOL or self.type.startswith(CONDITION):
# update weather symbol & status text
condition = data.get(CONDITION, None)
if condition:
if self.type == SYMBOL:
new_state = condition.get(EXACTNL, None)
if self.type == CONDITION:
new_state = condition.get(CONDITION, None)
if self.type == 'conditioncode':
new_state = condition.get(CONDCODE, None)
if self.type == 'conditiondetailed':
new_state = condition.get(DETAILED, None)
if self.type == 'conditionexact':
new_state = condition.get(EXACT, None)
img = condition.get(IMAGE, None)
if new_state != self._state or img != self._entity_picture:
self._state = new_state
self._entity_picture = img
return True
return False
if self.type.startswith(PRECIPITATION_FORECAST):
# update nested precipitation forecast sensors
nested = data.get(PRECIPITATION_FORECAST)
self._timeframe = nested.get(TIMEFRAME)
self._state = nested.get(self.type[len(PRECIPITATION_FORECAST)+1:])
return True
# update all other sensors
self._state = data.get(self.type)
return True
@property
def attribution(self):
"""Return the attribution."""
return self._attribution
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def entity_picture(self):
"""Weather symbol if type is symbol."""
return self._entity_picture
@property
def device_state_attributes(self):
"""Return the state attributes."""
from buienradar.buienradar import (PRECIPITATION_FORECAST)
if self.type.startswith(PRECIPITATION_FORECAST):
result = {ATTR_ATTRIBUTION: self._attribution}
if self._timeframe is not None:
result[TIMEFRAME_LABEL] = "%d min" % (self._timeframe)
return result
result = {
ATTR_ATTRIBUTION: self._attribution,
SENSOR_TYPES['stationname'][0]: self._stationname,
}
if self._measured is not None:
# convert datetime (Europe/Amsterdam) into local datetime
local_dt = dt_util.as_local(self._measured)
result[MEASURED_LABEL] = local_dt.strftime("%c")
return result
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return possible sensor specific icon."""
return SENSOR_TYPES[self.type][2]
@property
def force_update(self):
"""Return true for continuous sensors, false for discrete sensors."""
return self._force_update
class BrData:
"""Get the latest data and updates the states."""
def __init__(self, hass, coordinates, timeframe, devices):
"""Initialize the data object."""
self.devices = devices
self.data = {}
self.hass = hass
self.coordinates = coordinates
self.timeframe = timeframe
@asyncio.coroutine
def update_devices(self):
"""Update all devices/sensors."""
if self.devices:
tasks = []
# Update all devices
for dev in self.devices:
if dev.load_data(self.data):
tasks.append(dev.async_update_ha_state())
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
@asyncio.coroutine
def schedule_update(self, minute=1):
"""Schedule an update after minute minutes."""
_LOGGER.debug("Scheduling next update in %s minutes.", minute)
nxt = dt_util.utcnow() + timedelta(minutes=minute)
async_track_point_in_utc_time(self.hass, self.async_update,
nxt)
@asyncio.coroutine
def get_data(self, url):
"""Load data from specified url."""
from buienradar.buienradar import (CONTENT,
MESSAGE, STATUS_CODE, SUCCESS)
_LOGGER.debug("Calling url: %s...", url)
result = {SUCCESS: False, MESSAGE: None}
resp = None
try:
websession = async_get_clientsession(self.hass)
with async_timeout.timeout(10, loop=self.hass.loop):
resp = yield from websession.get(url)
result[STATUS_CODE] = resp.status
result[CONTENT] = yield from resp.text()
if resp.status == 200:
result[SUCCESS] = True
else:
result[MESSAGE] = "Got http statuscode: %d" % (resp.status)
return result
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
result[MESSAGE] = "%s" % err
return result
finally:
if resp is not None:
yield from resp.release()
@asyncio.coroutine
def async_update(self, *_):
"""Update the data from buienradar."""
from buienradar.buienradar import (parse_data, CONTENT,
DATA, MESSAGE, STATUS_CODE, SUCCESS)
content = yield from self.get_data('http://xml.buienradar.nl')
if not content.get(SUCCESS, False):
content = yield from self.get_data('http://api.buienradar.nl')
if content.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve xml data from Buienradar."
"(Msg: %s, status: %s,)",
content.get(MESSAGE),
content.get(STATUS_CODE),)
# schedule new call
yield from self.schedule_update(SCHEDULE_NOK)
return
# rounding coordinates prevents unnecessary redirects/calls
rainurl = 'http://gadgets.buienradar.nl/data/raintext/?lat={}&lon={}'
rainurl = rainurl.format(
round(self.coordinates[CONF_LATITUDE], 2),
round(self.coordinates[CONF_LONGITUDE], 2)
)
raincontent = yield from self.get_data(rainurl)
if raincontent.get(SUCCESS) is not True:
# unable to get the data
_LOGGER.warning("Unable to retrieve raindata from Buienradar."
"(Msg: %s, status: %s,)",
raincontent.get(MESSAGE),
raincontent.get(STATUS_CODE),)
# schedule new call
yield from self.schedule_update(SCHEDULE_NOK)
return
result = parse_data(content.get(CONTENT),
raincontent.get(CONTENT),
self.coordinates[CONF_LATITUDE],
self.coordinates[CONF_LONGITUDE],
self.timeframe)
_LOGGER.debug("Buienradar parsed data: %s", result)
if result.get(SUCCESS) is not True:
if int(datetime.now().strftime('%H')) > 0:
_LOGGER.warning("Unable to parse data from Buienradar."
"(Msg: %s)",
result.get(MESSAGE),)
yield from self.schedule_update(SCHEDULE_NOK)
return
self.data = result.get(DATA)
yield from self.update_devices()
yield from self.schedule_update(SCHEDULE_OK)
@property
def attribution(self):
"""Return the attribution."""
from buienradar.buienradar import ATTRIBUTION
return self.data.get(ATTRIBUTION)
@property
def stationname(self):
"""Return the name of the selected weatherstation."""
from buienradar.buienradar import STATIONNAME
return self.data.get(STATIONNAME)
@property
def condition(self):
"""Return the condition."""
from buienradar.buienradar import CONDITION
return self.data.get(CONDITION)
@property
def temperature(self):
"""Return the temperature, or None."""
from buienradar.buienradar import TEMPERATURE
try:
return float(self.data.get(TEMPERATURE))
except (ValueError, TypeError):
return None
@property
def pressure(self):
"""Return the pressure, or None."""
from buienradar.buienradar import PRESSURE
try:
return float(self.data.get(PRESSURE))
except (ValueError, TypeError):
return None
@property
def humidity(self):
"""Return the humidity, or None."""
from buienradar.buienradar import HUMIDITY
try:
return int(self.data.get(HUMIDITY))
except (ValueError, TypeError):
return None
@property
def visibility(self):
"""Return the visibility, or None."""
from buienradar.buienradar import VISIBILITY
try:
return int(self.data.get(VISIBILITY))
except (ValueError, TypeError):
return None
@property
def wind_speed(self):
"""Return the windspeed, or None."""
from buienradar.buienradar import WINDSPEED
try:
return float(self.data.get(WINDSPEED))
except (ValueError, TypeError):
return None
@property
def wind_bearing(self):
"""Return the wind bearing, or None."""
from buienradar.buienradar import WINDAZIMUTH
try:
return int(self.data.get(WINDAZIMUTH))
except (ValueError, TypeError):
return None
@property
def forecast(self):
"""Return the forecast data."""
from buienradar.buienradar import FORECAST
return self.data.get(FORECAST)
| {
"content_hash": "9fe5d2db563825a8ca8d8f4aca52a1ae",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 79,
"avg_line_length": 39.30769230769231,
"alnum_prop": 0.5828144458281445,
"repo_name": "persandstrom/home-assistant",
"id": "c7ca0c097ffd7c299777fd453cdc932e588dadbd",
"size": "22485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/buienradar.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
"""
Exception definitions.
"""
from manilaclient.openstack.common.apiclient.exceptions import * # noqa
class NoTokenLookupException(ClientException):
"""No support for looking up endpoints.
This form of authentication does not support looking up
endpoints from an existing token.
"""
pass
class VersionNotFoundForAPIMethod(Exception):
msg_fmt = "API version '%(vers)s' is not supported on '%(method)s' method."
def __init__(self, version, method):
self.version = version
self.method = method
def __str__(self):
return self.msg_fmt % {"vers": self.version, "method": self.method}
| {
"content_hash": "6d8cc30a7da7941227d08f613d3eb716",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 25.76,
"alnum_prop": 0.6785714285714286,
"repo_name": "sniperganso/python-manilaclient",
"id": "4b405821849bdf3f199c6fa78a99500d313d16fa",
"size": "1245",
"binary": false,
"copies": "1",
"ref": "refs/heads/bp/data-service-migration-api",
"path": "manilaclient/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "755723"
},
{
"name": "Shell",
"bytes": "11199"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import linecache
import tokenize
from functools import partial
from os.path import basename
from os.path import exists
from os.path import splitext
from threading import current_thread
from .const import SITE_PACKAGES_PATHS
from .const import SYS_PREFIX_PATHS
from .util import CYTHON_SUFFIX_RE
from .util import LEADING_WHITESPACE_RE
from .util import MISSING
from .util import cached_property
from .util import get_func_in_mro
from .util import get_main_thread
from .util import if_same_code
__all__ = ('Event',)
class Event:
"""
A wrapper object for Frame objects. Instances of this are passed to your custom functions or predicates.
Provides few convenience properties.
Args:
frame (Frame): A python `Frame <https://docs.python.org/3/reference/datamodel.html#frame-objects>`_ object.
kind (str): A string like ``'call'``, ``'line'``, ``'return'`` or ``'exception'``.
arg: A value that depends on ``kind``. Usually is ``None`` but for ``'return'`` or ``'exception'`` other values
may be expected.
tracer (:class:`hunter.tracer.Tracer`): The :class:`~hunter.tracer.Tracer` instance that created the event.
Needed for the ``calls`` and ``depth`` fields.
"""
frame = None
kind = None
arg = None
depth = None
calls = None
builtin = None
def __init__(
self,
frame,
kind,
arg,
tracer=None,
depth=None,
calls=None,
threading_support=MISSING,
):
if tracer is None:
if depth is None:
raise TypeError('Missing argument: depth (required because tracer was not given).')
if calls is None:
raise TypeError('Missing argument: calls (required because tracer was not given).')
if threading_support is MISSING:
raise TypeError('Missing argument: threading_support (required because tracer was not given).')
else:
depth = tracer.depth
calls = tracer.calls
threading_support = tracer.threading_support
#: The original Frame object.
#:
#: .. note::
#:
#: Not allowed in the builtin predicates (it's the actual Frame object).
#: You may access it from your custom predicate though.
self.frame = frame
if kind.startswith('c_'):
kind = kind[2:]
builtin = True
else:
builtin = False
#: If kind of the event is one of ``'c_call'``, ``'c_return'``, or ``'c_exception'`` then this will be True.
#:
#: :type: bool
self.builtin = builtin
#: The kind of the event, could be one of ``'call'``, ``'line'``, ``'return'``, ``'exception'``.
#:
#: :type: str
self.kind = kind
#: A value that depends on ``kind``
self.arg = arg
#: Tracing depth (increases on calls, decreases on returns).
#:
#: :type: int
self.depth = depth
#: A counter for total number of calls up to this Event.
#:
#: :type: int
self.calls = calls
#: A copy of the :attr:`hunter.tracer.Tracer.threading_support` flag.
#:
#: .. note::
#:
#: Not allowed in the builtin predicates. You may access it from your custom predicate though.
#:
#: :type: bool or None
self.threading_support = threading_support
#: Flag that is ``True`` if the event was created with :meth:`~hunter.event.Event.detach`.
#:
#: :type: bool
self.detached = False
def __repr__(self):
return '<Event kind=%r function=%r module=%r filename=%r lineno=%s>' % (
self.kind,
self.function,
self.module,
self.filename,
self.lineno,
)
def __eq__(self, other):
return (
type(self) == type(other)
and self.kind == other.kind
and self.depth == other.depth
and self.function == other.function
and self.module == other.module
and self.filename == other.filename
)
def __reduce__(self):
raise TypeError("cannot pickle 'hunter.event.Event' object")
def detach(self, value_filter=None):
"""
Return a copy of the event with references to live objects (like the frame) removed. You should use this if you
want to store or use the event outside the handler.
You should use this if you want to avoid memory leaks or side-effects when storing the events.
Args:
value_filter:
Optional callable that takes one argument: ``value``.
If not specified then the ``arg``, ``globals`` and ``locals`` fields will be ``None``.
Example usage in a :class:`~hunter.actions.ColorStreamAction` subclass:
.. sourcecode:: python
def __call__(self, event):
self.events = [event.detach(lambda field, value: self.try_repr(value))]
"""
event = Event.__new__(Event)
event.__dict__['code'] = self.code
event.__dict__['filename'] = self.filename
event.__dict__['fullsource'] = self.fullsource
event.__dict__['function'] = self.function
event.__dict__['lineno'] = self.lineno
event.__dict__['module'] = self.module
event.__dict__['source'] = self.source
event.__dict__['stdlib'] = self.stdlib
event.__dict__['threadid'] = self.threadid
event.__dict__['threadname'] = self.threadname
event.__dict__['instruction'] = self.instruction
if value_filter:
event.__dict__['arg'] = value_filter(self.arg)
event.__dict__['globals'] = {key: value_filter(value) for key, value in self.globals.items()}
event.__dict__['locals'] = {key: value_filter(value) for key, value in self.locals.items()}
else:
event.__dict__['globals'] = {}
event.__dict__['locals'] = {}
event.__dict__['arg'] = None
event.threading_support = self.threading_support
event.calls = self.calls
event.depth = self.depth
event.kind = self.kind
event.builtin = self.builtin
event.detached = True
return event
def clone(self):
event = Event.__new__(Event)
event.__dict__ = dict(self.__dict__)
return event
@cached_property
def instruction(self):
"""
Last byte instruction. If no bytecode was used (Cython code) then it returns ``None``.
Depending on Python version it might be an int or a single char string.
:type: int or single char string or None
"""
if self.frame.f_lasti >= 0 and self.frame.f_code.co_code:
return self.frame.f_code.co_code[self.frame.f_lasti]
@cached_property
def threadid(self):
"""
Current thread ident. If current thread is main thread then it returns ``None``.
:type: int or None
"""
current = self._thread.ident
main = get_main_thread()
if main is None:
return current
else:
return current if current != main.ident else None
@cached_property
def threadname(self):
"""
Current thread name.
:type: str
"""
return self._thread.name
@cached_property
def _thread(self):
return current_thread()
@cached_property
def locals(self):
"""
A dict with local variables.
:type: dict
"""
if self.builtin:
return {}
return self.frame.f_locals
@cached_property
def globals(self):
"""
A dict with global variables.
:type: dict
"""
if self.builtin:
return {}
return self.frame.f_globals
@cached_property
def function(self):
"""
A string with function name.
:type: str
"""
if self.builtin:
return self.arg.__name__
else:
return self.code.co_name
@cached_property
def function_object(self):
"""
The function instance.
.. warning:: Use with prudence.
* Will be ``None`` for decorated functions on Python 2 (methods may still work tho).
* May be ``None`` if tracing functions or classes not defined at module level.
* May be very slow if tracing modules with lots of variables.
:type: function or None
"""
# Based on MonkeyType's get_func
if self.builtin:
return self.builtin
code = self.code
if code.co_name is None:
return None
# First, try to find the function in globals
candidate = self.globals.get(code.co_name, None)
func = if_same_code(candidate, code)
# If that failed, as will be the case with class and instance methods, try
# to look up the function from the first argument. In the case of class/instance
# methods, this should be the class (or an instance of the class) on which our
# method is defined.
if func is None and code.co_argcount >= 1:
first_arg = self.locals.get(code.co_varnames[0])
func = get_func_in_mro(first_arg, code)
# If we still can't find the function, as will be the case with static methods,
# try looking at classes in global scope.
if func is None:
for v in self.globals.values():
if not isinstance(v, type):
continue
func = get_func_in_mro(v, code)
if func is not None:
break
return func
@cached_property
def module(self):
"""
A string with module name (like ``'foo.bar'``).
:type: str
"""
if self.builtin:
module = self.arg.__module__
else:
module = self.frame.f_globals.get('__name__', '')
if module is None:
module = '?'
return module
@cached_property
def filename(self):
"""
A string with the path to the module's file. May be empty if ``__file__`` attribute is missing.
May be relative if running scripts.
:type: str
"""
# if self.builtin:
# return '<builtin>'
# if self.builtin:
# return '<builtin>'
filename = self.frame.f_code.co_filename
if not filename:
filename = self.frame.f_globals.get('__file__')
if not filename:
filename = '?'
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + '.py'
elif filename.endswith(('.so', '.pyd')):
basename = CYTHON_SUFFIX_RE.sub('', filename)
for ext in ('.pyx', '.py'):
cyfilename = basename + ext
if exists(cyfilename):
filename = cyfilename
break
return filename
@cached_property
def lineno(self):
"""
An integer with line number in file.
:type: int
"""
return self.frame.f_lineno
@cached_property
def code(self):
"""
A code object (not a string).
"""
return self.frame.f_code
@cached_property
def stdlib(self):
"""
A boolean flag. ``True`` if frame is in stdlib.
:type: bool
"""
module_parts = self.module.split('.')
if 'pkg_resources' in module_parts:
# skip this over-vendored module
return True
elif self.filename == '<string>' and (self.module.startswith('namedtuple_') or self.module == 'site'):
# skip namedtuple exec garbage
return True
elif self.filename.startswith(SITE_PACKAGES_PATHS):
# if it's in site-packages then its definitely not stdlib
return False
elif self.filename.startswith(SYS_PREFIX_PATHS):
return True
else:
return False
@cached_property
def fullsource(self):
"""
A string with the sourcecode for the current statement (from ``linecache`` - failures are ignored).
May include multiple lines if it's a class/function definition (will include decorators).
:type: str
"""
try:
if self.kind == 'call' and self.code.co_name != '<module>':
lines = []
try:
for _, token, _, _, line in tokenize.generate_tokens(
partial(
next,
yield_lines(
self.filename,
self.frame.f_globals,
self.lineno - 1,
lines.append,
),
)
):
if token in ('def', 'class', 'lambda'):
return ''.join(lines)
except tokenize.TokenError:
pass
return linecache.getline(self.filename, self.lineno, self.frame.f_globals)
except Exception as exc:
return '??? NO SOURCE: {!r}'.format(exc)
@cached_property
def source(self):
"""
A string with the sourcecode for the current line (from ``linecache`` - failures are ignored).
Fast but sometimes incomplete.
:type: str
"""
if self.filename.endswith(('.so', '.pyd')):
return '??? NO SOURCE: not reading binary {} file'.format(splitext(basename(self.filename))[1])
try:
return linecache.getline(self.filename, self.lineno, self.frame.f_globals)
except Exception as exc:
return '??? NO SOURCE: {!r}'.format(exc)
__getitem__ = object.__getattribute__
def yield_lines(
filename,
module_globals,
start,
collector,
limit=10,
leading_whitespace_re=LEADING_WHITESPACE_RE,
):
dedent = None
amount = 0
for line in linecache.getlines(filename, module_globals)[start : start + limit]:
if dedent is None:
dedent = leading_whitespace_re.findall(line)
dedent = dedent[0] if dedent else ''
amount = len(dedent)
elif not line.startswith(dedent):
break
collector(line)
yield line[amount:]
| {
"content_hash": "31887d007a5f0acfbbfd83b0340b19a8",
"timestamp": "",
"source": "github",
"line_count": 468,
"max_line_length": 119,
"avg_line_length": 31.564102564102566,
"alnum_prop": 0.5456945572705117,
"repo_name": "ionelmc/python-hunter",
"id": "939d8c7bacd712a5c508812adef400517cecdb0d",
"size": "14772",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hunter/event.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Cython",
"bytes": "50327"
},
{
"name": "Python",
"bytes": "265849"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stein', '0018_remove_mineraltype_classification1'),
]
operations = [
migrations.AlterField(
model_name='mineraltype',
name='other',
field=models.TextField(blank=True, max_length=100, verbose_name='comment'),
),
]
| {
"content_hash": "95cfad82564de065230e55120c066267",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 87,
"avg_line_length": 24.5,
"alnum_prop": 0.6071428571428571,
"repo_name": "GeoMatDigital/django-geomat",
"id": "33465adafecf79e3a8c6391bc0249a5d337cb5e6",
"size": "467",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geomat/stein/migrations/0019_auto_20170115_1941.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16597"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "14474"
},
{
"name": "JavaScript",
"bytes": "31354"
},
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "197468"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-pipeline'
copyright = u'2011-2012, Timothée Peignier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3'
# The full version, including alpha/beta/rc tags.
release = '1.3.19'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-pipelinedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-pipeline.tex', u'Pipeline Documentation',
u'Timothée Peignier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-pipeline', u'Pipeline Documentation',
[u'Timothée Peignier'], 1)
]
| {
"content_hash": "ceef82a8e7b187ca4308a9f36364215c",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 32.44607843137255,
"alnum_prop": 0.706904366218462,
"repo_name": "almost/django-pipeline",
"id": "1270f02a2ab1b689e12473c029a9d05caa2777fa",
"size": "7041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "925"
},
{
"name": "CoffeeScript",
"bytes": "52"
},
{
"name": "JavaScript",
"bytes": "144"
},
{
"name": "Python",
"bytes": "78458"
},
{
"name": "Shell",
"bytes": "4529"
}
],
"symlink_target": ""
} |
import sys
import os
import codecs
import pygettext
MESSAGES = []
def detect_unicode_encoding(bytes):
encodings_map = [
(3, codecs.BOM_UTF8, 'UTF-8'),
(4, codecs.BOM_UTF32_LE, 'UTF-32LE'),
(4, codecs.BOM_UTF32_BE, 'UTF-32BE'),
(2, codecs.BOM_UTF16_LE, 'UTF-16LE'),
(2, codecs.BOM_UTF16_BE, 'UTF-16BE'),
]
for (offset, bom, name) in encodings_map:
if bytes[:offset] == bom:
return name, offset
return 'UTF-8', 0
class ParseError(ValueError):
"""Signals an error reading .po file."""
def merge(master_file, language_files):
parsed_master_file = parse(master_file)
for path in language_files:
merging(parsed_master_file, path)
def merging(parsed_master_file, path):
lang_file = parse(path)
id_map = {}
new_lang = []
for msg in lang_file:
id_map[msg['id']] = msg['message']
for msg in parsed_master_file:
msg['message'] = id_map.get(msg['id'])
new_lang.append(msg)
save(path, new_lang)
def items(path, sort_by, dir):
po = parse(path)
po = po[1:]
if sort_by:
return sort(po, sort_by, dir)
return po
def sort(po, sort_by, dir):
group = dict()
sorted = list()
col_map = dict(id='id', string='message', context='path')
for message in po:
group.setdefault(message[col_map[sort_by]], []).append(message)
kg = group.keys()
kg.sort()
if dir == 'up':
kg.reverse()
for k in kg:
sorted.extend(group[k])
return sorted
def save(path, message_list):
txt = []
m = message_list[0]['message']
txt.append(m)
txt.append(u'\n\n')
for p in message_list[1:]:
message = p['message'] or ''
context = p['context']
id = p['id']
txt.append(u'#: %s' % context)
txt.append(u'msgid %s\n' % normalize(id))
txt.append(u'msgstr %s\n\n' % normalize(message))
txt = u''.join(txt)
backup_name = path.replace('.po', '.back')
try:
os.remove(backup_name)
except os.error:
pass
os.rename(path, backup_name)
codecs.open(path, 'wb', 'utf-8').write(txt)
def update(path, msg_id, msg_text):
message_list = parse(path)
for p in message_list[1:]:
if p['id'].strip() == msg_id.strip():
p['message'] = msg_text
save(path, message_list)
def quote(msg):
return pygettext.escape_unicode(msg)
def normalize(s):
# taken from pygettext module but changed a bit
lines = s.split('\n')
if len(lines) == 1:
s = '"' + quote(s) + '"'
else:
if not lines[-1]:
del lines[-1]
lines[-1] = lines[-1] + '\n'
for i in range(len(lines)):
lines[i] = quote(lines[i])
lineterm = '\\n"\n"'
s = '""\n"' + lineterm.join(lines) + '"'
return s
def add(id, str, context, fuzzy, MESSAGES):
"Add a non-fuzzy translation to the dictionary."
if fuzzy:
return
c = context.split(':')
path = c[0]
file = os.path.basename(path)
line = c[-1].replace('\n','') #remove the \n
MESSAGES.append(dict(id=id,
message=str,
path=path,
context=context,
file=file,
line=line
))
def parse(infile):
MESSAGES = list()
ID = 1
STR = 2
header = list()
fd = open(infile, 'rt')
encoding, offset = detect_unicode_encoding(fd.read(4))
fd.seek(offset)
lines = [line.decode(encoding) for line in fd.readlines()]
section = None
fuzzy = 0
# Parse the catalog
lno = 0
context = ''
prev_context = ''
heading = True
for l in lines:
if not l:
continue
lno += 1
if heading:
if l.startswith('#: '):
heading = False
if l.startswith('msgid "') and header and \
'Generated-By:' in header[-1]:
heading = False
if l.strip() and heading:
header.append(l)
# If we get a comment line after a msgstr, this is a new entry
if l[0] == '#' and section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
section = None
fuzzy = 0
# Record a fuzzy mark
if l[:2] == '#,' and l.find('fuzzy'):
fuzzy = 1
if l.startswith('#: '):
context = l[len('#: '):]
# Skip comments
if l[0] == '#':
continue
# Now we are in a msgid section, output previous section
if l.startswith('msgid'):
if section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
section = ID
prev_context = context
l = l[5:]
msgid = msgstr = ''
# Now we are in a msgstr section
elif l.startswith('msgstr'):
section = STR
l = l[6:]
# Skip empty lines
l = l.strip()
if not l:
continue
# XXX: Does this always follow Python escape semantics?
try:
l = eval(l)
except Exception, e:
print >> sys.stderr, 'Escape error on %s: %d' % (infile, lno), \
'before:', repr(l)
raise ParseError(e)
try:
l = l.decode('utf8')
except UnicodeDecodeError:
print >> sys.stderr, 'Encoding error on %s: %d' % (infile, lno), \
'before:', repr(l)
raise ParseError(e)
if section == ID:
msgid += l
elif section == STR:
msgstr += l
else:
print >> sys.stderr, 'Syntax error on %s:%d' % (infile, lno), \
'before:'
print >> sys.stderr, l
raise ParseError(e)
# Add last entry
if section == STR:
add(msgid, msgstr, prev_context, fuzzy, MESSAGES)
MESSAGES[0]['message'] = u''.join(header)
return MESSAGES
| {
"content_hash": "27e20e997e35fcbb2e2b24c72b0dc8f1",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 78,
"avg_line_length": 24.68979591836735,
"alnum_prop": 0.5088444370970409,
"repo_name": "dbrattli/python-gearshift",
"id": "c6c8f3813d522bbc2a896ca2f663b84baea868e9",
"size": "6049",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gearshift/i18n/pygettext/catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7127"
},
{
"name": "Python",
"bytes": "606764"
}
],
"symlink_target": ""
} |
import copy
import feedparser
import pendulum
import testfixtures
import unittest
from box import Box
from hashlib import md5
from unittest.mock import AsyncMock, MagicMock, patch
from rssalertbot.config import Config
from rssalertbot.feed import Feed
from rssalertbot.storage import BaseStorage
group = Box({
"name": "Test Group",
"outputs": {
"log": {
"enabled": True,
}
}
})
testdata = {
"name": "test feed",
"url": "http://localhost:8930",
}
class MockStorage(BaseStorage):
def __init__(self, *args, **kwargs):
self.data = {}
def _read(self, name):
return self.data.get(name)
def _write(self, name, date):
self.data[name] = date
def _delete(self, name):
del self.data[name]
class TestFeeds(unittest.IsolatedAsyncioTestCase):
def test_setup(self):
"""
In which we test that a feed was created properly.
"""
config = Config()
feed = Feed(config, MockStorage(), group, testdata['name'], testdata['url'])
# test the basics
self.assertEqual(feed.name, testdata['name'])
self.assertEqual(feed.url, testdata['url'])
# test this stuff got merged from the group
self.assertIn('log', feed.outputs)
self.assertIn('enabled', feed.outputs['log'])
self.assertTrue(feed.outputs['log']['enabled'])
# test we can get save a date and get it back
date = pendulum.now('UTC')
feed.storage.save_date(feed.feed, date)
self.assertEqual(date, feed.previous_date())
async def test_alerts_enabled(self):
"""
In which we make sure enabled alerts are called.
"""
config = Config({
'outputs': {
'email': {
'enabled': True,
'from': 'monkey@jwplayer.test',
'to': 'monkey@jwplayer.test',
},
'log': {
'enabled': True,
},
'slack': {
'enabled': True,
'token': 'monkeys',
'channel': '#foo',
},
},
})
feed = Feed(config, MockStorage(), group, testdata['name'], testdata['url'])
self.assertTrue(feed.outputs.get('email.enabled'))
self.assertTrue(feed.outputs.get('log.enabled'))
self.assertTrue(feed.outputs.get('slack.enabled'))
with patch('rssalertbot.alerts', new=AsyncMock) as alerts:
alerts.alert_email = MagicMock()
alerts.alert_log = MagicMock()
alerts.alert_slack = AsyncMock()
await feed.alert(self.make_entry())
alerts.alert_email.assert_called()
alerts.alert_log.assert_called()
alerts.alert_slack.assert_awaited()
def test_alert_slack_missing_values(self):
config = Config({
'outputs': {
'slack': {
'enabled': True,
}
}
})
mygroup = copy.deepcopy(group)
with testfixtures.LogCapture() as capture:
Feed(
cfg = config,
storage = MockStorage(),
group = mygroup,
name = testdata['name'],
url = testdata['url'],
)
capture.check_present(
('rssalertbot.feed', 'ERROR', 'Slack enabled but slack.channel not set!'),
('rssalertbot.feed', 'ERROR', 'Slack enabled but slack.token not set!'),
order_matters=False,
)
async def test_alerts_disabled(self):
"""
In which we make sure disabled alerts are NOT called.
"""
config = Config({
'outputs': {
'email': {
'enabled': False,
'from': 'monkey@jwplayer.test',
'to': 'monkey@jwplayer.test',
},
'log': {
'enabled': False,
},
'slack': {
'enabled': False,
},
},
})
feed = Feed(config, MockStorage(), group, testdata['name'], testdata['url'])
self.assertFalse(feed.outputs.get('email.enabled'))
self.assertFalse(feed.outputs.get('slack.enabled'))
# the group overrides this one!
self.assertTrue(feed.outputs.get('log.enabled'))
with patch('rssalertbot.alerts') as alerts:
alerts.alert_email = MagicMock()
alerts.alert_log = MagicMock()
alerts.alert_slack = AsyncMock()
await feed.alert(self.make_entry())
alerts.alert_email.assert_not_called()
alerts.alert_slack.assert_not_awaited()
# again, the group overrides this!
alerts.alert_log.assert_called()
def test_previous_date_recent(self):
stored_date = pendulum.now('UTC').subtract(minutes=10)
storage = MockStorage()
storage.data = {f"{group.name}-{testdata['name']}": stored_date}
feed = Feed(Config(), storage, group, testdata['name'], testdata['url'])
self.assertEqual(stored_date, feed.previous_date())
def test_previous_date_old(self):
stored_date = pendulum.now('UTC').subtract(days=10)
storage = MockStorage()
storage.data = {f"{group.name}-{testdata['name']}": stored_date}
feed = Feed(Config(), storage, group, testdata['name'], testdata['url'])
self.assertEqual(pendulum.yesterday('UTC'), feed.previous_date())
def test_previous_date_not_found(self):
storage = MockStorage()
storage.data = {f"{group.name}-{testdata['name']}": None}
feed = Feed(Config(), storage, group, testdata['name'], testdata['url'])
self.assertEqual(pendulum.yesterday('UTC'), feed.previous_date())
def make_entry(self, title="test entry", description="test description", date=None):
"""
Make a test entry
"""
date = date or pendulum.now('UTC')
return Box({
'title': title,
'description': description,
'published': date,
'datestring': date.to_rfc1123_string(),
})
class TestFeedProcessing(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.now = pendulum.now('UTC')
self.publish_date = self.now
self.event_title = "Incident: Things are not going well!"
self.event_description = "Issue summary: OH NO"
self.storage = MockStorage()
self.storage.save_event = MagicMock(wraps=self.storage.save_event)
self.storage.delete_event = MagicMock(wraps=self.storage.delete_event)
self.feed = Feed(Config(), self.storage, group, testdata['name'], testdata['url'])
self.feed.alert = AsyncMock()
def storage_name(self):
event_id = md5((self.event_title + self.event_description).encode()).hexdigest()
return f"{self.feed.feed}-{event_id}"
async def process_feed(self, rss=None):
if not rss:
rss = f"""
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel>
<title>Fake System Status</title>
<link>https://status.fake.system/</link>
<description>Fake System Status Notices</description>
<lastBuildDate>{self.publish_date.to_rss_string()}</lastBuildDate>
<item>
<title>{self.event_title}</title>
<description>{self.event_description}</description>
<pubDate>{self.publish_date.to_rss_string()}</pubDate>
<link>https://status.fake.system/abcdef</link>
<guid>https://status.fake.system/abcdef</guid>
</item>
</channel>
</rss>
"""
self.feed.fetch_and_parse = AsyncMock(return_value=feedparser.parse(rss).entries)
await self.feed.process()
def assert_timestamps_equal(self, stamp1, stamp2):
self.assertEqual(0, stamp1.diff(stamp2).in_seconds())
async def test_process_empty(self):
await self.process_feed('<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0"></rss>')
self.feed.storage.save_event.assert_not_called()
self.feed.alert.assert_not_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_message_too_old(self):
self.publish_date = self.publish_date.subtract(days=5)
await self.process_feed()
self.storage.save_event.assert_not_called()
self.feed.alert.assert_not_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_recent(self):
self.publish_date = self.publish_date.subtract(minutes=5)
await self.process_feed()
self.feed.storage.save_event.assert_not_called()
self.feed.alert.assert_called()
self.feed.storage.delete_event.assert_not_called()
self.assert_timestamps_equal(self.publish_date, self.feed.storage.data[self.feed.feed])
async def test_process_new_future_event(self):
self.publish_date = self.publish_date.add(minutes=10)
await self.process_feed()
self.assert_timestamps_equal(self.now, self.storage.data[self.storage_name()])
self.feed.alert.assert_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_seen_future_event_realert(self):
self.publish_date = self.publish_date.add(minutes=10)
storage_name = self.storage_name()
self.storage.data[storage_name] = self.now.subtract(days=2)
await self.process_feed()
self.assert_timestamps_equal(self.now, self.storage.data[storage_name])
self.feed.alert.assert_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_seen_future_event_custom_realert(self):
self.publish_date = self.publish_date.add(minutes=10)
self.feed.cfg["re_alert"] = 1
storage_name = self.storage_name()
self.storage.data[storage_name] = self.now.subtract(hours=2)
await self.process_feed()
self.assert_timestamps_equal(self.now, self.storage.data[storage_name])
self.feed.alert.assert_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_seen_future_event(self):
self.publish_date = self.publish_date.add(minutes=10)
self.storage.data[self.storage_name()] = self.now.subtract(minutes=5)
await self.process_feed()
self.feed.storage.save_event.assert_not_called()
self.feed.alert.assert_not_called()
self.storage.delete_event.assert_not_called()
self.assertNotIn(self.feed.feed, self.storage.data)
async def test_process_seen_event_from_past(self):
self.publish_date = self.publish_date.subtract(minutes=10)
storage_name = self.storage_name()
self.storage.data[storage_name] = self.now.subtract(hours=5)
await self.process_feed()
self.storage.save_event.assert_not_called()
self.feed.alert.assert_called()
self.assertNotIn(storage_name, self.storage.data)
self.assert_timestamps_equal(self.publish_date, self.storage.data[self.feed.feed])
async def test_process_multiple_messages(self):
self.publish_date = self.publish_date.subtract(minutes=10)
future_event_title = "Notice: The future is coming"
future_event_description = "Issue summary: something's gonna happen but I forget what"
rss = f"""
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
<channel>
<title>Fake System Status</title>
<link>https://status.fake.thing/</link>
<description>Fake System Status Notices</description>
<lastBuildDate>{self.publish_date.to_rss_string()}</lastBuildDate>
<item>
<title>{future_event_title}</title>
<description>{future_event_description}</description>
<pubDate>{self.publish_date.add(days=2).to_rss_string()}</pubDate>
<link>https://status.fake.thing/zyxwvu</link>
<guid>https://status.fake.thing/zyxwvu</guid>
</item>
<item>
<title>Recovery: Things are okay I guess</title>
<description>Issue summary: yeah it's alright now</description>
<pubDate>{self.publish_date.to_rss_string()}</pubDate>
<link>https://status.fake.thing/ghijkl</link>
<guid>https://status.fake.thing/ghijkl</guid>
</item>
<item>
<title>{self.event_title}</title>
<description>{self.event_description}</description>
<pubDate>{self.publish_date.subtract(minutes=10).to_rss_string()}</pubDate>
<link>https://status.fake.thing/abcdef</link>
<guid>https://status.fake.thing/abcdef</guid>
</item>
</channel>
</rss>
"""
self.event_title = future_event_title
self.event_description = future_event_description
self.storage.data[self.storage_name()] = self.now.subtract(minutes=5)
await self.process_feed(rss)
self.storage.save_event.assert_not_called()
self.assertEqual(2, self.feed.alert.call_count)
self.storage.delete_event.assert_not_called()
self.assert_timestamps_equal(self.publish_date, self.storage.data[self.feed.feed])
| {
"content_hash": "add257d503d02db89ff7720287a6b34e",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 104,
"avg_line_length": 37.31496062992126,
"alnum_prop": 0.5754378560877822,
"repo_name": "jwplayer/rssalertbot",
"id": "13a93899efb2dfc29086cbdcf7bf045e9215182e",
"size": "14218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_feeds.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1037"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "57574"
},
{
"name": "Shell",
"bytes": "33"
}
],
"symlink_target": ""
} |
import sys, getopt, requests, json
def printusertext(p_message):
#prints a line of text that is meant for the user to read
#do not process these lines when chaining scripts
print('@ %s' % p_message)
def printhelp():
#prints help text
printusertext("This is a script that prints a list of an organization's devices to sdtout or a file.")
printusertext('')
printusertext('Usage:')
printusertext('python invlist.py -k <API key> -o <org name> [-f <file path>]')
printusertext('')
printusertext('Use double quotes ("") in Windows to pass arguments containing spaces. Names are case-sensitive.')
def getorgid(p_apikey, p_orgname):
#looks up org id for a specific org name
#on failure returns 'null'
r = requests.get('https://dashboard.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
if r.status_code != requests.codes.ok:
return 'null'
rjson = r.json()
for record in rjson:
if record['name'] == p_orgname:
return record['id']
return('null')
def getshardurl(p_apikey, p_orgid):
#patch
return("api.meraki.com")
def getnwlist(p_apikey, p_shardurl, p_orgid):
#returns a list of all networks in an organization
#on failure returns a single record with 'null' name and id
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_shardurl, p_orgid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'name': 'null', 'id': 'null'})
return(returnvalue)
return(r.json())
def getdevicelist(p_apikey, p_shardurl, p_nwid):
#returns a list of all devices in a network
r = requests.get('https://%s/api/v0/networks/%s/devices' % (p_shardurl, p_nwid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'serial': 'null', 'model': 'null'})
return(returnvalue)
return(r.json())
def getnwvlanips(p_apikey, p_shardurl, p_nwid):
#returns MX VLANs for a network
r = requests.get('https://%s/api/v0/networks/%s/vlans' % (p_shardurl, p_nwid), headers={'X-Cisco-Meraki-API-Key': p_apikey, 'Content-Type': 'application/json'})
returnvalue = []
if r.status_code != requests.codes.ok:
returnvalue.append({'id': 'null'})
return(returnvalue)
return(r.json())
def main(argv):
#get command line arguments
arg_apikey = 'null'
arg_orgname = 'null'
arg_filepath = 'null'
try:
opts, args = getopt.getopt(argv, 'hk:o:f:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
arg_apikey = arg
elif opt == '-o':
arg_orgname = arg
elif opt == '-f':
arg_filepath = arg
if arg_apikey == 'null' or arg_orgname == 'null':
printhelp()
sys.exit(2)
#get organization id corresponding to org name provided by user
orgid = getorgid(arg_apikey, arg_orgname)
if orgid == 'null':
printusertext('ERROR: Fetching organization failed')
sys.exit(2)
#get shard URL where Org is stored
shardurl = getshardurl(arg_apikey, orgid)
if shardurl == 'null':
printusertext('ERROR: Fetching Meraki cloud shard URL failed')
sys.exit(2)
#get network list for fetched org id
nwlist = getnwlist(arg_apikey, shardurl, orgid)
if nwlist[0]['id'] == 'null':
printusertext('ERROR: Fetching network list failed')
sys.exit(2)
#if user selected to print in file, set flag & open for writing
filemode = False
if arg_filepath != 'null':
try:
f = open(arg_filepath, 'w')
except:
printusertext('ERROR: Unable to open output file for writing')
sys.exit(2)
filemode = True
devicelist = []
recordstring = []
vlanips = []
for nwrecord in nwlist:
#get devices' list
devicelist = getdevicelist(arg_apikey, shardurl, nwrecord['id'])
#append list to file or stdout
for i in range (0, len(devicelist)):
#START: MODIFY THESE LINES TO CHANGE OUTPUT FORMAT
#create string to be printed if filemode, a '\n' will be added later
#use try-except so that code does not crash if lanIp, wan1Ip or wan2Ip are missing
recordstring = devicelist[i]['serial'] + ',' + devicelist[i]['model']
try:
if (len(devicelist[i]['lanIp']) > 4):
recordstring += ',' + devicelist[i]['lanIp']
except:
pass
try:
if (len(devicelist[i]['wan1Ip']) > 4):
recordstring += ',' + devicelist[i]['wan1Ip']
except:
pass
try:
if (len(devicelist[i]['wan2Ip']) > 4):
recordstring += ',' + devicelist[i]['wan2Ip']
except:
pass
#if the device is an MX or Z1, LAN interface IPs will be listed under network VLANs
if (devicelist[i]['model'].startswith('MX') or devicelist[i]['model'].startswith('Z1')):
vlanips = getnwvlanips(arg_apikey, shardurl, nwrecord['id'])
if vlanips[0]['id'] != 'null':
for j in range (0, len(vlanips)):
recordstring = recordstring + ',' + vlanips[j]['applianceIp']
#END: MODIFY THESE LINES TO CHANGE OUTPUT FORMAT
#print record to file or stdout
if filemode:
recordstring += '\n'
try:
f.write(recordstring)
except:
printusertext('ERROR: Unable to write device info to file')
sys.exit(2)
else:
print(recordstring)
if __name__ == '__main__':
main(sys.argv[1:]) | {
"content_hash": "f7a2d0984d2c1bcae998f9fd5d22c0b8",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 170,
"avg_line_length": 30.844444444444445,
"alnum_prop": 0.6431916426512968,
"repo_name": "meraki/automation-scripts",
"id": "536ea7f5a1a7cfc075faf2e46d6d7e3e24e616a4",
"size": "6241",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "listip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2448"
},
{
"name": "Python",
"bytes": "1121262"
},
{
"name": "Ruby",
"bytes": "857"
}
],
"symlink_target": ""
} |
import unittest
from scripts.migrate_to_whatsapp_templates.prebirth3 import Prebirth3Migration
class Testprebirth3(unittest.TestCase):
def setUp(self):
self.prebirth3 = Prebirth3Migration()
def test_sequence_number_to_weeks(self):
"""
Given a certain sequence number for the prebirth 1 messageset, it should return
the correct number of weeks pregnant
"""
self.assertEqual(self.prebirth3.sequence_number_to_weeks(1), 36)
self.assertEqual(self.prebirth3.sequence_number_to_weeks(2), 36)
self.assertEqual(self.prebirth3.sequence_number_to_weeks(3), 36)
self.assertEqual(self.prebirth3.sequence_number_to_weeks(14), 40)
self.assertEqual(self.prebirth3.sequence_number_to_weeks(15), 40)
def test_get_template_variables(self):
message = {
"id": "1",
"messageset": "2",
"sequence_number": "3",
"lang": "zul_ZA",
"text_content": "test",
"binary_content": "",
"metadata": "{}",
}
self.assertEqual(self.prebirth3.get_template_variables(message), ["36", "test"])
| {
"content_hash": "194629cc8817e128cfa1af9cdb4b1bf7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 37.354838709677416,
"alnum_prop": 0.6260794473229706,
"repo_name": "praekeltfoundation/ndoh-hub",
"id": "bdc451481f1a8c8cc9ac9464d916c01dfd163e77",
"size": "1158",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "scripts/migrate_to_whatsapp_templates/tests/test_prebirth3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "450"
},
{
"name": "HTML",
"bytes": "2200"
},
{
"name": "Python",
"bytes": "957306"
},
{
"name": "Shell",
"bytes": "2796"
}
],
"symlink_target": ""
} |
from . import QtWidgets, QtWebEngineWidgets
from ..config import get_user_or_default_config_path
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
import lxml.html
import lxml.etree
class SourceDialog(QtWidgets.QDialog):
'''Dialog displaying HTML source of the view.'''
def __init__(self, html, title, parent=None):
super(SourceDialog, self).__init__(parent)
self.text_edit = QtWebEngineWidgets.QWebEngineView()
r = lxml.html.fromstring(html)
html = lxml.etree.tostring(r, encoding='unicode', pretty_print=True)
html = highlight(html, HtmlLexer(), HtmlFormatter())
html += "<style>" + SourceDialog.get_stylesheet() + "</style>"
self.text_edit.setHtml(html)
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.text_edit)
self.setLayout(layout)
self.setWindowTitle(title)
self.setStyleSheet(SourceDialog.get_stylesheet())
self.adjustSize()
@classmethod
def get_stylesheet(cls):
if not hasattr(cls, "_stylesheet"):
path = get_user_or_default_config_path("source_view.css")
with open(path, "r") as f:
cls._stylesheet = f.read()
return cls._stylesheet | {
"content_hash": "b8ce4e5c240ecca57a3dc64d7929bc3b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 34.1578947368421,
"alnum_prop": 0.662557781201849,
"repo_name": "janpipek/hlava",
"id": "a058d7d003fac3233004d4585c01f51ab2218675",
"size": "1298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hlava/qtui/source_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9293"
},
{
"name": "Makefile",
"bytes": "108"
},
{
"name": "Python",
"bytes": "127427"
}
],
"symlink_target": ""
} |
"""
Olaf
~~~~~~~~~~~~~
command line tool to help you manage multiple requirements files.
"""
from __future__ import absolute_import
import glob
import click
import subprocess
import collections
@click.command()
@click.option('--prefix', '-p', default='requirements',
help='Match files on {prefix}*.txt')
@click.option('--write', '-w', is_flag=True,
help='Use if you want to commit the changes to your files.')
@click.argument('cmd', default='freeze', required=True)
def main(cmd, prefix, write):
if cmd == 'freeze':
freeze(prefix, write)
if cmd == 'dupes':
dupes(prefix, write)
def freeze(prefix, write):
frozen = pipfreeze()
files = create_files_store(prefix)
unreqed = []
match_versions(frozen, files, unreqed)
add_installed(files, unreqed)
find_duplicates(files)
do_write(files, write)
check_uninstalled(frozen, files)
def get_reqs_files(prefix):
return glob.glob('{}*.txt'.format(prefix))
def create_files_store(prefix):
files = []
for name in get_reqs_files(prefix):
files.append({
'name': name,
'packages': jam(name),
'updated': [],
'removed': []
})
return files
def dupes(prefix, write):
files = create_files_store(prefix)
find_duplicates(files)
do_write(files, write)
def find_duplicates(files):
primary = files[0]
others = files[1:len(files)]
for package in primary['packages'].copy():
for other in others:
otherpacks = other['packages']
try:
dupe = otherpacks[package]
except KeyError:
pass
else:
this = {
'file': primary['name'],
'package': package,
'version': primary['packages'][package]
}
that = {
'file': other['name'],
'package': package,
'version': other['packages'][package]
}
click.echo(
"Found {} in {} and {}".format(
package, this['file'], that['file']))
value = click.prompt(
"""[0]Keep both\n[1] Keep {} in {}\n[2] Keep {} in {}\n""".format(
this['version'],
this['file'],
that['version'],
that['file']),
type=int
)
if value == 1:
del otherpacks[package]
other['updated'].append(package)
other['removed'].append(package)
elif value == 2:
del primary['packages'][package]
primary['updated'].append(package)
primary['removed'].append(package)
def write_updates(files):
# write the requirements files.
for req in files:
if len(req['updated']) > 0:
rewrite(req)
else:
click.echo('No updates in {}'.format(req['name']))
def print_updates(files):
# print the requirements files.
for req in files:
output(req)
def match_versions(frozen, files, unreqed):
for package, version in frozen:
found = False
for req in files:
if package in req['packages']:
if req['packages'][package] != version:
click.echo(
'{} specifies version {} of {}, the installed version is {}'.format(
req['name'], req['packages'][package], package, version))
value = click.prompt(
"""[1] Update {}\n[2] Keep existing value\n""".format(req['name']),
type=int
)
if value == 1:
req['packages'][package] = version
req['updated'].append(package)
elif value == 2:
pass
found = True
if found is False:
unreqed.append({
package: version
})
def check_uninstalled(frozen, files):
installed = []
reqed = []
for k, v in frozen:
installed.append(str(k))
for reqs in files:
for k, v in reqs['packages'].items():
reqed.append(str(k))
uninstalled = list(set(reqed) - set(installed))
if len(uninstalled) > 0:
click.echo(
click.style(
"The following packages are in your requirements but not installed.",
fg='red'))
click.echo(click.style('\n'.join(uninstalled), fg='red'))
return uninstalled
def add_installed(files, unreqed):
unique = [dict(y) for y in set(tuple(x.items()) for x in unreqed)]
for package_d in unique:
for package, version in package_d.items():
# import pdb; pdb.set_trace()
click.echo(
u'{} is installed but not in any requirements file'.format(
package))
pstr = '[0] Ignore\n'
for index, rf in enumerate(files):
pstr += '[{}] Add to {}\n'.format(index + 1, rf['name'])
value = click.prompt(pstr, type=int)
try:
req_file = files[value - 1]
except IndexError:
click.echo('Not a valid selection soz.')
else:
req_file['packages'][package] = version
req_file['updated'].append(package)
def pipfreeze():
frozen = []
try:
packages = subprocess.check_output(["pip", "freeze"]).decode("utf-8")
except subprocess.CalledProcessError as e:
click.echo('Error with pip freeze: {}'.format(e))
else:
for line in packages.splitlines():
sline = str(line)
if sline.startswith(u'-e '):
package, version = (sline.strip(), '')
frozen.append((package, version))
elif sline.startswith(u'## FIXME:'):
pass
elif u'==' in sline:
package, version = sline.strip().split(u'==')
frozen.append((package, version))
return frozen
def jam(filename):
packages = {}
try:
with open(filename, 'r') as infile:
infile.seek(0)
for line in infile.readlines():
if line.startswith(u'-e '):
package, version = (line.strip(), '')
packages[package] = version
elif '==' in line:
package, version = line.strip().split(u'==')
packages[package] = version
else:
packages[line.strip()] = 'latest'
except IOError:
click.echo('File {} not found.'.format(filename))
infile.close()
return packages
def rewrite(req):
try:
with open(req['name'], 'w') as outfile:
outfile.seek(0)
outfile.truncate()
packages = collections.OrderedDict(
sorted(req['packages'].items(), key=lambda t: str(t[0]))
)
for k, v in packages.items():
outfile.write('{}\n'.format(lineout(str(k), str(v))))
except IOError:
click.echo('File {} not found.'.format(req['name']))
outfile.close()
click.echo('Updated {} with packages \n\t{}'.format(
req['name'], '\n\t'.join([str(_) for _ in req['updated']])))
for item in req['removed']:
click.echo(click.style('# Removed package {}'.format(item), fg='yellow'))
def output(req):
click.echo('\n')
click.echo('#' * 80)
click.echo('# {}'.format(req['name']))
click.echo('#' * 80)
packages = collections.OrderedDict(
sorted(req['packages'].items(), key=lambda t: str(t[0]))
)
for k, v in packages.items():
if k not in req['updated']:
click.echo(lineout(str(k), str(v)))
else:
click.echo(click.style(lineout(str(k), str(v)), fg='green'))
for item in req['removed']:
click.echo(click.style('# Removed package {}'.format(item), fg='yellow'))
click.echo('\n')
def do_write(files, write):
if write is True:
write_updates(files)
else:
print_updates(files)
def lineout(package, version):
if package.startswith(u'-e ') or version == u'latest':
return package
else:
return '{}=={}'.format(package, version)
| {
"content_hash": "63e4565bd2ff4ae1803396af6cac88fb",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 92,
"avg_line_length": 30.385159010600706,
"alnum_prop": 0.502965461100128,
"repo_name": "hactar-is/olaf",
"id": "1e293d8a25ca88a374e71f4361432fccc8b88716",
"size": "8624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "olaf/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10542"
}
],
"symlink_target": ""
} |
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# good to go!
def main():
# The collection URL below.
collURL = 'http://www.thelatinlibrary.com/eucherius.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.split(':')[0].strip()
colltitle = collSOUP.title.string.split(':')[1].strip()
date = "no date found"
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Eucherius'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
title = colltitle
getp = textsoup.find_all('p')
verse = 0
chapter = -1
for p in getp:
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallborder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
verses = []
text = p.get_text()
text = text.strip()
if p.find('b') is not None:
continue
if re.match('2\.', text):
# handle 2 verses in same <p>
verse = 1
text = text.replace("2.", "").strip()
lines = re.split("3\.", text)
for l in lines:
if l is None or l == '' or l.isspace():
continue
if l.startswith('Christian'):
continue
verse += 1
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, l.strip(), url, 'prose'))
continue
elif re.match('[0-9]+\.', text):
# get verse numbers
verse = text.split(".")[0].strip()
text = text.replace(verse + ".", "").strip()
verses.append(text)
for v in verses:
if v.startswith('Christian'):
continue
if v is None or v == '' or v.isspace():
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, v.strip(), url, 'prose'))
if __name__ == '__main__':
main()
| {
"content_hash": "fd431bd286b46c90cc9b161d47323655",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 110,
"avg_line_length": 36.38636363636363,
"alnum_prop": 0.4540911930043723,
"repo_name": "oudalab/phyllo",
"id": "95e6c595edfecb52036619172e312dfdc83b8831",
"size": "3202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phyllo/extractors/eucheriusDB.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "919"
},
{
"name": "HTML",
"bytes": "3428"
},
{
"name": "Python",
"bytes": "1253920"
},
{
"name": "Shell",
"bytes": "1077"
}
],
"symlink_target": ""
} |
"""
Basic Encoding Rules (BER) for ASN.1
"""
from scapy.error import warning
from scapy.utils import inet_aton,inet_ntoa
from asn1 import ASN1_Decoding_Error,ASN1_Encoding_Error,ASN1_BadTag_Decoding_Error,ASN1_Codecs,ASN1_Class_UNIVERSAL,ASN1_Error,ASN1_DECODING_ERROR,ASN1_BADTAG
##################
## BER encoding ##
##################
#####[ BER tools ]#####
class BER_Exception(Exception):
pass
class BER_Encoding_Error(ASN1_Encoding_Error):
def __init__(self, msg, encoded=None, remaining=None):
Exception.__init__(self, msg)
self.remaining = remaining
self.encoded = encoded
def __str__(self):
s = Exception.__str__(self)
if isinstance(self.encoded, BERcodec_Object):
s+="\n### Already encoded ###\n%s" % self.encoded.strshow()
else:
s+="\n### Already encoded ###\n%r" % self.encoded
s+="\n### Remaining ###\n%r" % self.remaining
return s
class BER_Decoding_Error(ASN1_Decoding_Error):
def __init__(self, msg, decoded=None, remaining=None):
Exception.__init__(self, msg)
self.remaining = remaining
self.decoded = decoded
def __str__(self):
s = Exception.__str__(self)
if isinstance(self.decoded, BERcodec_Object):
s+="\n### Already decoded ###\n%s" % self.decoded.strshow()
else:
s+="\n### Already decoded ###\n%r" % self.decoded
s+="\n### Remaining ###\n%r" % self.remaining
return s
class BER_BadTag_Decoding_Error(BER_Decoding_Error, ASN1_BadTag_Decoding_Error):
pass
def BER_len_enc(l, size=0):
if l <= 127 and size==0:
return chr(l)
s = ""
while l or size>0:
s = chr(l&0xff)+s
l >>= 8L
size -= 1
if len(s) > 127:
raise BER_Exception("BER_len_enc: Length too long (%i) to be encoded [%r]" % (len(s),s))
return chr(len(s)|0x80)+s
def BER_len_dec(s):
l = ord(s[0])
if not l & 0x80:
return l,s[1:]
l &= 0x7f
if len(s) <= l:
raise BER_Decoding_Error("BER_len_dec: Got %i bytes while expecting %i" % (len(s)-1, l),remaining=s)
ll = 0L
for c in s[1:l+1]:
ll <<= 8L
ll |= ord(c)
return ll,s[l+1:]
def BER_num_enc(l, size=1):
x=[]
while l or size>0:
x.insert(0, l & 0x7f)
if len(x) > 1:
x[0] |= 0x80
l >>= 7
size -= 1
return "".join([chr(k) for k in x])
def BER_num_dec(s):
x = 0
for i in range(len(s)):
c = ord(s[i])
x <<= 7
x |= c&0x7f
if not c&0x80:
break
if c&0x80:
raise BER_Decoding_Error("BER_num_dec: unfinished number description", remaining=s)
return x, s[i+1:]
#####[ BER classes ]#####
class BERcodec_metaclass(type):
def __new__(cls, name, bases, dct):
c = super(BERcodec_metaclass, cls).__new__(cls, name, bases, dct)
try:
c.tag.register(c.codec, c)
except:
warning("Error registering %r for %r" % (c.tag, c.codec))
return c
class BERcodec_Object:
__metaclass__ = BERcodec_metaclass
codec = ASN1_Codecs.BER
tag = ASN1_Class_UNIVERSAL.ANY
@classmethod
def asn1_object(cls, val):
return cls.tag.asn1_object(val)
@classmethod
def check_string(cls, s):
if not s:
raise BER_Decoding_Error("%s: Got empty object while expecting tag %r" %
(cls.__name__,cls.tag), remaining=s)
@classmethod
def check_type(cls, s):
cls.check_string(s)
if cls.tag != ord(s[0]):
raise BER_BadTag_Decoding_Error("%s: Got tag [%i/%#x] while expecting %r" %
(cls.__name__, ord(s[0]), ord(s[0]),cls.tag), remaining=s)
return s[1:]
@classmethod
def check_type_get_len(cls, s):
s2 = cls.check_type(s)
if not s2:
raise BER_Decoding_Error("%s: No bytes while expecting a length" %
cls.__name__, remaining=s)
return BER_len_dec(s2)
@classmethod
def check_type_check_len(cls, s):
l,s3 = cls.check_type_get_len(s)
if len(s3) < l:
raise BER_Decoding_Error("%s: Got %i bytes while expecting %i" %
(cls.__name__, len(s3), l), remaining=s)
return l,s3[:l],s3[l:]
@classmethod
def do_dec(cls, s, context=None, safe=False):
if context is None:
context = cls.tag.context
cls.check_string(s)
p = ord(s[0])
if p not in context:
t = s
if len(t) > 18:
t = t[:15]+"..."
raise BER_Decoding_Error("Unknown prefix [%02x] for [%r]" % (p,t), remaining=s)
codec = context[p].get_codec(ASN1_Codecs.BER)
return codec.dec(s,context,safe)
@classmethod
def dec(cls, s, context=None, safe=False):
if not safe:
return cls.do_dec(s, context, safe)
try:
return cls.do_dec(s, context, safe)
except BER_BadTag_Decoding_Error,e:
o,remain = BERcodec_Object.dec(e.remaining, context, safe)
return ASN1_BADTAG(o),remain
except BER_Decoding_Error, e:
return ASN1_DECODING_ERROR(s, exc=e),""
except ASN1_Error, e:
return ASN1_DECODING_ERROR(s, exc=e),""
@classmethod
def safedec(cls, s, context=None):
return cls.dec(s, context, safe=True)
@classmethod
def enc(cls, s):
if type(s) is str:
return BERcodec_STRING.enc(s)
else:
return BERcodec_INTEGER.enc(int(s))
ASN1_Codecs.BER.register_stem(BERcodec_Object)
class BERcodec_INTEGER(BERcodec_Object):
tag = ASN1_Class_UNIVERSAL.INTEGER
@classmethod
def enc(cls, i):
s = []
while 1:
s.append(i&0xff)
if -127 <= i < 0:
break
if 128 <= i <= 255:
s.append(0)
i >>= 8
if not i:
break
s = map(chr, s)
s.append(BER_len_enc(len(s)))
s.append(chr(cls.tag))
s.reverse()
return "".join(s)
@classmethod
def do_dec(cls, s, context=None, safe=False):
l,s,t = cls.check_type_check_len(s)
x = 0L
if s:
if ord(s[0])&0x80: # negative int
x = -1L
for c in s:
x <<= 8
x |= ord(c)
return cls.asn1_object(x),t
class BERcodec_BOOLEAN(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.BOOLEAN
class BERcodec_ENUMERATED(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.ENUMERATED
class BERcodec_NULL(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.NULL
@classmethod
def enc(cls, i):
if i == 0:
return chr(cls.tag)+"\0"
else:
return BERcodec_INTEGER.enc(i)
class BERcodec_SEP(BERcodec_NULL):
tag = ASN1_Class_UNIVERSAL.SEP
class BERcodec_STRING(BERcodec_Object):
tag = ASN1_Class_UNIVERSAL.STRING
@classmethod
def enc(cls,s):
return chr(cls.tag)+BER_len_enc(len(s))+s
@classmethod
def do_dec(cls, s, context=None, safe=False):
l,s,t = cls.check_type_check_len(s)
return cls.tag.asn1_object(s),t
class BERcodec_BIT_STRING(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.BIT_STRING
class BERcodec_PRINTABLE_STRING(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.PRINTABLE_STRING
class BERcodec_T61_STRING (BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.T61_STRING
class BERcodec_IA5_STRING(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.IA5_STRING
class BERcodec_NUMERIC_STRING(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.NUMERIC_STRING
class BERcodec_VIDEOTEX_STRING(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.VIDEOTEX_STRING
class BERcodec_IPADDRESS(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.IPADDRESS
@classmethod
def enc(cls, ipaddr_ascii):
try:
s = inet_aton(ipaddr_ascii)
except Exception:
raise BER_Encoding_Error("IPv4 address could not be encoded")
return chr(cls.tag)+BER_len_enc(len(s))+s
@classmethod
def do_dec(cls, s, context=None, safe=False):
l,s,t = cls.check_type_check_len(s)
try:
ipaddr_ascii = inet_ntoa(s)
except Exception:
raise BER_Decoding_Error("IP address could not be decoded", decoded=obj)
return cls.asn1_object(ipaddr_ascii), t
class BERcodec_UTC_TIME(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.UTC_TIME
class BERcodec_GENERALIZED_TIME(BERcodec_STRING):
tag = ASN1_Class_UNIVERSAL.GENERALIZED_TIME
class BERcodec_TIME_TICKS(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.TIME_TICKS
class BERcodec_GAUGE32(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.GAUGE32
class BERcodec_COUNTER32(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.COUNTER32
class BERcodec_COUNTER64(BERcodec_INTEGER):
tag = ASN1_Class_UNIVERSAL.COUNTER64
class BERcodec_SEQUENCE(BERcodec_Object):
tag = ASN1_Class_UNIVERSAL.SEQUENCE
@classmethod
def enc(cls, l):
if type(l) is not str:
l = "".join(map(lambda x: x.enc(cls.codec), l))
return chr(cls.tag)+BER_len_enc(len(l))+l
@classmethod
def do_dec(cls, s, context=None, safe=False):
if context is None:
context = cls.tag.context
l,st = cls.check_type_get_len(s) # we may have len(s) < l
s,t = st[:l],st[l:]
obj = []
while s:
try:
o,s = BERcodec_Object.dec(s, context, safe)
except BER_Decoding_Error, err:
err.remaining += t
if err.decoded is not None:
obj.append(err.decoded)
err.decoded = obj
raise
obj.append(o)
if len(st) < l:
raise BER_Decoding_Error("Not enough bytes to decode sequence", decoded=obj)
return cls.asn1_object(obj),t
class BERcodec_SET(BERcodec_SEQUENCE):
tag = ASN1_Class_UNIVERSAL.SET
class BERcodec_OID(BERcodec_Object):
tag = ASN1_Class_UNIVERSAL.OID
@classmethod
def enc(cls, oid):
lst = [int(x) for x in oid.strip(".").split(".")]
if len(lst) >= 2:
lst[1] += 40*lst[0]
del(lst[0])
s = "".join([BER_num_enc(k) for k in lst])
return chr(cls.tag)+BER_len_enc(len(s))+s
@classmethod
def do_dec(cls, s, context=None, safe=False):
l,s,t = cls.check_type_check_len(s)
lst = []
while s:
l,s = BER_num_dec(s)
lst.append(l)
if (len(lst) > 0):
lst.insert(0,lst[0]/40)
lst[1] %= 40
return cls.asn1_object(".".join([str(k) for k in lst])), t
| {
"content_hash": "4d540c26f4895adc0584e4ec5174fb7d",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 159,
"avg_line_length": 30.63711911357341,
"alnum_prop": 0.550994575045208,
"repo_name": "lthurlow/python-tcpsnoop",
"id": "622cb4966c44448929cebed35d88b338b32f95f9",
"size": "11259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scapy/asn1/ber.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2082934"
},
{
"name": "Shell",
"bytes": "971"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
from corehq.sql_db.operations import HqRunPython
def do_not_email_migration(apps, schema_editor):
Subscription = apps.get_model("accounting", "Subscription")
Subscription.objects.filter(do_not_email_invoice=True).update(do_not_email_reminder=True)
class Migration(migrations.Migration):
dependencies = [
('accounting', '0033_merge'),
]
operations = [
migrations.RenameField(
model_name='subscription',
old_name='do_not_email',
new_name='do_not_email_invoice',
),
migrations.AddField(
model_name='subscription',
name='do_not_email_reminder',
field=models.BooleanField(default=False),
preserve_default=True,
),
HqRunPython(do_not_email_migration, reverse_code=lambda app, schema_editor: None)
]
| {
"content_hash": "6a03dec22911e7069fe1eb0cc5e12ed4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 93,
"avg_line_length": 31.2,
"alnum_prop": 0.6442307692307693,
"repo_name": "qedsoftware/commcare-hq",
"id": "b6dd5188b4791df4bd1dcade00041b6fba0aa262",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/accounting/migrations/0034_do_not_email_reminders.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
} |
"""pudl setup"""
from setuptools import setup, find_packages
execfile('pudl/version.py')
setup(name='pudl',
version=__version__,
description="""pudl is an Active Directory client library and CLI""",
author='zulily, llc',
author_email='opensource@zulily.com',
packages=find_packages(),
url='https://github.com/zulily/pudl',
license='Apache License, Version 2.0',
entry_points={
'console_scripts': [
'pudl = pudl.scripts.cli:main'
]
},
install_requires=[
'python-ldap',
'pyyaml'
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: System :: Systems Administration',
'Topic :: System :: Systems Administration :: Authentication/Directory',
'Topic :: System :: Systems Administration :: Authentication/Directory :: LDAP',
],
)
| {
"content_hash": "c9714597c4ff3e8293189d71e084028f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 90,
"avg_line_length": 33.114285714285714,
"alnum_prop": 0.5789473684210527,
"repo_name": "zulily/pudl",
"id": "0d823f56127916665227d24c180bfe686d7b5b5a",
"size": "1759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "41932"
}
],
"symlink_target": ""
} |
class Country(object):
def __init__(self, name, code):
self.name = name
self.code = code | {
"content_hash": "1ed228465c49f348b8d2780ac6ea2044",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 35,
"avg_line_length": 21.8,
"alnum_prop": 0.5596330275229358,
"repo_name": "M4gn4tor/mastercard-api-python",
"id": "9bbebf006dc1412a78c3b8208e735e5edb76167b",
"size": "109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Services/restaurants/domain/restaurant/country.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "229234"
}
],
"symlink_target": ""
} |
from pycompss.api.task import task
from pycompss.api.constraint import constraint
from storage.storage_object import StorageObject
from pycompss.api.parameter import *
class TNet(StorageObject):
def __init__(self):
super(TNet, self).__init__()
def main_extract_features(self, bs, image_paths, pooled, mean):
return {'a':[1, 2, 3, 4], 'b':[5, 6, 7, 8]}
class EA(TNet):
def __init__(self):
super(EA, self).__init__()
@constraint(computing_units="1")
@task(target_direction=IN, returns=object)
def extract_features(self, bs, image_paths, pooled, mean):
return self.main_extract_features(bs, image_paths, pooled, mean)
| {
"content_hash": "0b9b40278176d6d3578d7c51e162a803",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 72,
"avg_line_length": 30.772727272727273,
"alnum_prop": 0.6617429837518464,
"repo_name": "mF2C/COMPSs",
"id": "dc32c0e702fdd88db664f228dea2b39ae0766353",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/pscos/1_redis_python/src/modules/tnets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "1595"
},
{
"name": "C",
"bytes": "222477"
},
{
"name": "C++",
"bytes": "200186"
},
{
"name": "Dockerfile",
"bytes": "901"
},
{
"name": "Gnuplot",
"bytes": "4195"
},
{
"name": "Java",
"bytes": "4213323"
},
{
"name": "JavaScript",
"bytes": "16906"
},
{
"name": "Jupyter Notebook",
"bytes": "10514"
},
{
"name": "Lex",
"bytes": "1356"
},
{
"name": "M4",
"bytes": "5538"
},
{
"name": "Makefile",
"bytes": "14740"
},
{
"name": "Python",
"bytes": "635267"
},
{
"name": "Shell",
"bytes": "1241476"
},
{
"name": "XSLT",
"bytes": "177323"
},
{
"name": "Yacc",
"bytes": "3655"
}
],
"symlink_target": ""
} |
import torch
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner
@BBOX_ASSIGNERS.register_module()
class GridAssigner(BaseAssigner):
"""Assign a corresponding gt bbox or background to each bbox.
Each proposals will be assigned with `-1`, `0`, or a positive integer
indicating the ground truth index.
- -1: don't care
- 0: negative sample, no assigned gt
- positive integer: positive sample, index (1-based) of assigned gt
Args:
pos_iou_thr (float): IoU threshold for positive bboxes.
neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
min_pos_iou (float): Minimum iou for a bbox to be considered as a
positive bbox. Positive samples can have smaller IoU than
pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
gt_max_assign_all (bool): Whether to assign all bboxes with the same
highest overlap with some gt to that gt.
"""
def __init__(self,
pos_iou_thr,
neg_iou_thr,
min_pos_iou=.0,
gt_max_assign_all=True,
iou_calculator=dict(type='BboxOverlaps2D')):
self.pos_iou_thr = pos_iou_thr
self.neg_iou_thr = neg_iou_thr
self.min_pos_iou = min_pos_iou
self.gt_max_assign_all = gt_max_assign_all
self.iou_calculator = build_iou_calculator(iou_calculator)
def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
"""Assign gt to bboxes. The process is very much like the max iou
assigner, except that positive samples are constrained within the cell
that the gt boxes fell in.
This method assign a gt bbox to every bbox (proposal/anchor), each bbox
will be assigned with -1, 0, or a positive number. -1 means don't care,
0 means negative sample, positive number is the index (1-based) of
assigned gt.
The assignment is done in following steps, the order matters.
1. assign every bbox to -1
2. assign proposals whose iou with all gts <= neg_iou_thr to 0
3. for each bbox within a cell, if the iou with its nearest gt >
pos_iou_thr and the center of that gt falls inside the cell,
assign it to that bbox
4. for each gt bbox, assign its nearest proposals within the cell the
gt bbox falls in to itself.
Args:
bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
box_responsible_flags (Tensor): flag to indicate whether box is
responsible for prediction, shape(n, )
gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
Returns:
:obj:`AssignResult`: The assign result.
"""
num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
# compute iou between all gt and bboxes
overlaps = self.iou_calculator(gt_bboxes, bboxes)
# 1. assign -1 by default
assigned_gt_inds = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
if num_gts == 0 or num_bboxes == 0:
# No ground truth or boxes, return empty assignment
max_overlaps = overlaps.new_zeros((num_bboxes, ))
if num_gts == 0:
# No truth, assign everything to background
assigned_gt_inds[:] = 0
if gt_labels is None:
assigned_labels = None
else:
assigned_labels = overlaps.new_full((num_bboxes, ),
-1,
dtype=torch.long)
return AssignResult(
num_gts,
assigned_gt_inds,
max_overlaps,
labels=assigned_labels)
# 2. assign negative: below
# for each anchor, which gt best overlaps with it
# for each anchor, the max iou of all gts
# shape of max_overlaps == argmax_overlaps == num_bboxes
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
if isinstance(self.neg_iou_thr, float):
assigned_gt_inds[(max_overlaps >= 0)
& (max_overlaps <= self.neg_iou_thr)] = 0
elif isinstance(self.neg_iou_thr, (tuple, list)):
assert len(self.neg_iou_thr) == 2
assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
& (max_overlaps <= self.neg_iou_thr[1])] = 0
# 3. assign positive: falls into responsible cell and above
# positive IOU threshold, the order matters.
# the prior condition of comparison is to filter out all
# unrelated anchors, i.e. not box_responsible_flags
overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.
# calculate max_overlaps again, but this time we only consider IOUs
# for anchors responsible for prediction
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
# for each gt, which anchor best overlaps with it
# for each gt, the max iou of all proposals
# shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)
pos_inds = (max_overlaps >
self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1
# 4. assign positive to max overlapped anchors within responsible cell
for i in range(num_gts):
if gt_max_overlaps[i] > self.min_pos_iou:
if self.gt_max_assign_all:
max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
box_responsible_flags.type(torch.bool)
assigned_gt_inds[max_iou_inds] = i + 1
elif box_responsible_flags[gt_argmax_overlaps[i]]:
assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1
# assign labels of positive anchors
if gt_labels is not None:
assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
pos_inds = torch.nonzero(
assigned_gt_inds > 0, as_tuple=False).squeeze()
if pos_inds.numel() > 0:
assigned_labels[pos_inds] = gt_labels[
assigned_gt_inds[pos_inds] - 1]
else:
assigned_labels = None
return AssignResult(
num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
| {
"content_hash": "b1c2602291ce379daf5d1d8c6323304b",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 79,
"avg_line_length": 43.96774193548387,
"alnum_prop": 0.5801907556859868,
"repo_name": "open-mmlab/mmdetection",
"id": "a0c814e782ebc79600cae4ca4e66b4ebaf47c81e",
"size": "6863",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmdet/core/bbox/assigners/grid_assigner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
} |
"""Bitmap font generator.
This will generate a bitmap font from input font files. This uses the
Python FreeType module (freetype-py). Rather than using config files,
this should be called directly from Python code.
"""
import collections
import freetype
import json
import numpy
import PIL.Image
from . import rectpack
ASCII_PRINT = ''.join(chr(x) for x in range(32, 127))
class AlphaStyle(object):
"""Base style type."""
def _init_face(self, face, size):
face.set_char_size(round(size * 64))
def _get_glyph(self, face, c, idx):
gidx = face.get_char_index(c)
face.load_glyph(gidx)
bitmap = face.glyph.bitmap
arr = (numpy.array(bitmap.buffer, dtype=numpy.uint8)
.reshape(bitmap.rows, bitmap.width))
return _Glyph(
c,
int((face.glyph.advance.x + 64 / 2) / 64),
face.glyph.bitmap_left,
face.glyph.bitmap_top,
arr,
idx,
gidx)
_Glyph = collections.namedtuple('_Glyph', 'chr advance bx by arr idx gidx')
_Font = collections.namedtuple('_Font', 'glyphs margin info')
class FontSet(object):
"""A set of fonts to render to a bitmap."""
__slots__ = ['_depth', '_fonts', '_rects']
def __init__(self, *, depth=8):
self._depth = depth
self._fonts = []
self._rects = []
def add(self, *, name=None, size, path,
margin=1, charset=ASCII_PRINT, style=AlphaStyle()):
"""Add an alpha mask font to the font set.
size: Font size, in pixels (floats are okay)
path: Path to the font file
margin: Margin on all sides of each glyph
charset: Set of characters to include
style: Style for bitmap rendering
"""
charset = sorted(set(charset))
if not all(isinstance(c, str) and len(c) == 1 for c in charset):
raise TypeError('invalid character set')
face = freetype.Face(path)
style._init_face(face, size)
info = {}
if name is None:
info['name'] = face.family_name.decode('ASCII')
else:
info['name'] = name
info['bold'] = bool(face.style_flags & freetype.FT_STYLE_FLAG_BOLD)
info['italic'] = bool(face.style_flags & freetype.FT_STYLE_FLAG_ITALIC)
info['size'] = size
m = face.size
info['ascender'] = (m.ascender + 32) >> 6
info['descender'] = (m.descender + 32) >> 6
info['height'] = (m.height + 32) >> 6
glyphs = []
for i, c in enumerate(charset, len(self._rects)):
g = style._get_glyph(face, c, i)
glyphs.append(g)
self._rects.append((
g.arr.shape[1] + margin * 2,
g.arr.shape[0] + margin * 2,
))
kern = []
for nx, gx in enumerate(glyphs):
gkern = []
for ny, gy in enumerate(glyphs):
kx = face.get_kerning(gx.chr, gy.chr,
freetype.FT_KERNING_DEFAULT).x >> 6
if not kx:
continue
gkern.append('{},{}'.format(ny, kx))
if gkern:
kern.append('{},{}'.format(nx, ','.join(gkern)))
if kern:
info['kern'] = ':'.join(kern)
self._fonts.append(_Font(glyphs, margin, info))
def save(self, *, image_path, json_path):
"""Save the font set to the given image and json files."""
pack = rectpack.pack(self._rects)
if not pack:
raise Exception('font packing failed')
data = []
a = numpy.zeros((pack.height, pack.width), dtype=numpy.uint8)
for font in self._fonts:
gdata = []
for g in font.glyphs:
r = pack.rects[g.idx]
x = r.x + font.margin
y = r.y + font.margin
a[y:y+g.arr.shape[0],x:x+g.arr.shape[1]] = g.arr
gdata.extend([
# Advance
g.advance,
# Size x, y
g.arr.shape[1], g.arr.shape[0],
# Pen offset x, y
g.bx, g.by,
# Texture location x, y
x, y,
])
fdata = dict(font.info)
fdata.update(
char=''.join(g.chr for g in font.glyphs),
glyph=','.join(str(n) for n in gdata),
)
data.append(fdata)
print('Fonts: {}'.format(len(self._fonts)))
print('Glyphs: {}'.format(len(self._rects)))
print('Writing data to {}'.format(json_path))
with open(json_path, 'w') as fp:
json.dump(data, fp, indent=2, sort_keys=True)
print('Writing image to {}'.format(image_path))
img = PIL.Image.fromarray(a)
img.save(image_path)
| {
"content_hash": "23f60ca7143ee460844072d93f71232c",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 36.38059701492537,
"alnum_prop": 0.5142564102564102,
"repo_name": "depp/shifter-children",
"id": "e4c6c47ad19b5415d041dfbd0146ef9b8dc5807d",
"size": "5069",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/font.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "632"
},
{
"name": "GLSL",
"bytes": "2080"
},
{
"name": "JavaScript",
"bytes": "5107"
},
{
"name": "Makefile",
"bytes": "415"
},
{
"name": "TypeScript",
"bytes": "79192"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import random
from django.utils.encoding import python_2_unicode_compatible
from django.utils import six
from django.utils.six.moves import range
from django.db import models
@python_2_unicode_compatible
class Office(models.Model):
office = models.CharField('office', max_length=255)
address = models.CharField('address', max_length=255)
def __str__(self):
return self.office
class Meta:
verbose_name = 'office'
verbose_name_plural = 'offices'
@python_2_unicode_compatible
class Tag(models.Model):
name = models.CharField('tag', max_length=255)
def __str__(self):
return self.name
class Meta:
verbose_name = 'tag'
verbose_name_plural = 'tags'
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField('name', max_length=255)
surname = models.CharField('surname', max_length=255)
gender = models.CharField('gender', max_length=255, choices=(
('M', 'Male'),
('F', 'Female'),
))
security_level = models.PositiveIntegerField('security level')
some_excluded_field = models.DecimalField(
'some decimal', max_digits=10, decimal_places=3, null=True)
office = models.ForeignKey(Office, null=True, blank=True)
tags = models.ManyToManyField(Tag)
class Settings:
sync = {
'custom_fields': ['custom_method', 'none_param'],
'exclude': ['some_excluded_field']
}
class Meta:
verbose_name = 'person'
verbose_name_plural = 'persons'
def populate(self, num=100):
for i in range(num):
name = list(self.name)
random.shuffle(name)
name = ''.join(name)
surname = list(self.surname)
random.shuffle(surname)
surname = ''.join(surname)
newobj = self.__class__(
name=name,
office=self.office,
surname=surname,
gender=random.choice(['M', 'F']),
security_level=random.choice(range(100))
)
newobj.save()
newobj.tags.add(*self.tags.all())
newobj.save()
self.save()
@classmethod
def populate_for_test(cls, count):
instance = cls.objects.create(
name=six.text_type('test instance éprouver'),
surname='test surname',
gender='M', security_level=10)
r_instance = Office.objects.create(
office='test', address='addr')
tags = [Tag(name='test'), Tag(name='test1')]
for tag in tags:
tag.save()
instance.tags.add(tag)
instance.office = r_instance
instance.save()
instance.populate(count)
return instance, r_instance, tags
@property
def custom_method(self):
return six.text_type('{}-{}').format(self.name, self.surname)
@custom_method.setter
def custom_method(self, value):
self.name, self.surname = value.split('-')
@property
def none_param(self):
return None
@none_param.setter
def none_param(self, value):
pass
def __str__(self):
return self.name
| {
"content_hash": "c0693ccc0c10fd5a3ba871347839b0b7",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 69,
"avg_line_length": 27.05,
"alnum_prop": 0.5856438693776956,
"repo_name": "mtrgroup/django-mtr-sync",
"id": "638ea97d1d43199fa134cbef7caf2763a83ab379",
"size": "3264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "192118"
},
{
"name": "Python",
"bytes": "137393"
}
],
"symlink_target": ""
} |
"""
Logistic Regression classifier satisfying differential privacy.
"""
import numbers
import warnings
import numpy as np
from joblib import delayed, Parallel
from scipy import optimize
from sklearn.exceptions import ConvergenceWarning
from sklearn import linear_model
from sklearn.utils import check_array, check_consistent_length
from sklearn.utils.multiclass import check_classification_targets
# todo: Remove when sklearn v1.1.0 is min requirement
try:
from sklearn.linear_model._linear_loss import LinearModelLoss
from sklearn._loss import HalfBinomialLoss
SKL_LOSS_MODULE = True
except (ModuleNotFoundError, ImportError):
from sklearn.linear_model._logistic import _logistic_loss_and_grad
SKL_LOSS_MODULE = False
from diffprivlib.accountant import BudgetAccountant
from diffprivlib.mechanisms import Vector
from diffprivlib.utils import PrivacyLeakWarning, warn_unused_args, check_random_state
from diffprivlib.validation import DiffprivlibMixin
class LogisticRegression(linear_model.LogisticRegression, DiffprivlibMixin):
r"""Logistic Regression (aka logit, MaxEnt) classifier with differential privacy.
This class implements regularised logistic regression using :ref:`Scipy's L-BFGS-B algorithm
<scipy:optimize.minimize-lbfgsb>`. :math:`\epsilon`-Differential privacy is achieved relative to the maximum norm
of the data, as determined by `data_norm`, by the :class:`.Vector` mechanism, which adds a Laplace-distributed
random vector to the objective. Adapted from the work presented in [CMS11]_.
This class is a child of :obj:`sklearn.linear_model.LogisticRegression`, with amendments to allow for the
implementation of differential privacy. Some parameters of `Scikit Learn`'s model have therefore had to be fixed,
including:
- The only permitted solver is 'lbfgs'. Specifying the ``solver`` option will result in a warning.
- Consequently, the only permitted penalty is 'l2'. Specifying the ``penalty`` option will result in a warning.
- In the multiclass case, only the one-vs-rest (OvR) scheme is permitted. Specifying the ``multi_class`` option
will result in a warning.
Parameters
----------
epsilon : float, default: 1.0
Privacy parameter :math:`\epsilon`.
data_norm : float, optional
The max l2 norm of any row of the data. This defines the spread of data that will be protected by
differential privacy.
If not specified, the max norm is taken from the data when ``.fit()`` is first called, but will result in a
:class:`.PrivacyLeakWarning`, as it reveals information about the data. To preserve differential privacy fully,
`data_norm` should be selected independently of the data, i.e. with domain knowledge.
tol : float, default: 1e-4
Tolerance for stopping criteria.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float. Like in support vector machines, smaller values
specify stronger regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function.
max_iter : int, default: 100
Maximum number of iterations taken for the solver to converge. For smaller `epsilon` (more noise), `max_iter`
may need to be increased.
verbose : int, default: 0
Set to any positive number for verbosity.
warm_start : bool, default: False
When set to ``True``, reuse the solution of the previous call to fit as initialization, otherwise, just erase
the previous solution.
n_jobs : int, optional
Number of CPU cores used when parallelising over classes. ``None`` means 1 unless in a context. ``-1`` means
using all processors.
random_state : int or RandomState, optional
Controls the randomness of the model. To obtain a deterministic behaviour during randomisation,
``random_state`` has to be fixed to an integer.
accountant : BudgetAccountant, optional
Accountant to keep track of privacy budget.
Attributes
----------
classes_ : array, shape (n_classes, )
A list of class labels known to the classifier.
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem is binary.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero. `intercept_` is of shape (1,) when the
given problem is binary.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary, it returns only 1 element.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from diffprivlib.models import LogisticRegression
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(data_norm=12, epsilon=2).fit(X, y)
>>> clf.predict(X[:2, :])
array([0, 0])
>>> clf.predict_proba(X[:2, :])
array([[7.35362932e-01, 2.16667422e-14, 2.64637068e-01],
[9.08384378e-01, 3.47767052e-13, 9.16156215e-02]])
>>> clf.score(X, y)
0.5266666666666666
See also
--------
sklearn.linear_model.LogisticRegression : The implementation of logistic regression in scikit-learn, upon which this
implementation is built.
.Vector : The mechanism used by the model to achieve differential privacy.
References
----------
.. [CMS11] Chaudhuri, Kamalika, Claire Monteleoni, and Anand D. Sarwate. "Differentially private empirical risk
minimization." Journal of Machine Learning Research 12, no. Mar (2011): 1069-1109.
"""
def __init__(self, *, epsilon=1.0, data_norm=None, tol=1e-4, C=1.0, fit_intercept=True, max_iter=100, verbose=0,
warm_start=False, n_jobs=None, random_state=None, accountant=None, **unused_args):
super().__init__(penalty='l2', dual=False, tol=tol, C=C, fit_intercept=fit_intercept, intercept_scaling=1.0,
class_weight=None, random_state=random_state, solver='lbfgs', max_iter=max_iter,
multi_class='ovr', verbose=verbose, warm_start=warm_start, n_jobs=n_jobs)
self.epsilon = epsilon
self.data_norm = data_norm
self.classes_ = None
self.accountant = BudgetAccountant.load_default(accountant)
self._warn_unused_args(unused_args)
# noinspection PyAttributeOutsideInit
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : ignored
Ignored by diffprivlib. Present for consistency with sklearn API.
Returns
-------
self : class
"""
self.accountant.check(self.epsilon, 0)
if sample_weight is not None:
self._warn_unused_args("sample_weight")
random_state = check_random_state(self.random_state)
if not isinstance(self.C, numbers.Real) or self.C < 0:
raise ValueError(f"Penalty term must be positive; got (C={self.C})")
if not isinstance(self.max_iter, numbers.Integral) or self.max_iter < 0:
raise ValueError(f"Maximum number of iteration must be positive; got (max_iter={self.max_iter})")
if not isinstance(self.tol, numbers.Real) or self.tol < 0:
raise ValueError(f"Tolerance for stopping criteria must be positive; got (tol={self.tol})")
X, y = self._validate_data(X, y, accept_sparse='csr', dtype=float, order="C",
accept_large_sparse=True)
check_classification_targets(y)
self.classes_ = np.unique(y)
_, n_features = X.shape
if self.data_norm is None:
warnings.warn("Data norm has not been specified and will be calculated on the data provided. This will "
"result in additional privacy leakage. To ensure differential privacy and no additional "
"privacy leakage, specify `data_norm` at initialisation.", PrivacyLeakWarning)
self.data_norm = np.linalg.norm(X, axis=1).max()
X = self._clip_to_norm(X, self.data_norm)
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes in the data, but the data contains only "
f"one class: {classes_[0]}")
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef, self.intercept_[:, np.newaxis], axis=1)
self.coef_ = []
self.intercept_ = np.zeros(n_classes)
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(_logistic_regression_path)
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer='processes')(
path_func(X, y, epsilon=self.epsilon / n_classes, data_norm=self.data_norm, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, max_iter=self.max_iter, tol=self.tol, verbose=self.verbose,
coef=warm_start_coef_, random_state=random_state, check_input=False)
for class_, warm_start_coef_ in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features + int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
self.accountant.spend(self.epsilon, 0)
return self
def _logistic_regression_path(X, y, epsilon, data_norm, pos_class=None, Cs=10, fit_intercept=True, max_iter=100,
tol=1e-4, verbose=0, coef=None, random_state=None, check_input=True, **unused_args):
"""Compute a Logistic Regression model with differential privacy for a list of regularization parameters. Takes
inspiration from ``_logistic_regression_path`` in scikit-learn, specified to the LBFGS solver and one-vs-rest
multi class fitting.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Input data, target values.
epsilon : float
Privacy parameter for differential privacy.
data_norm : float
Max norm of the data for which differential privacy is satisfied.
pos_class : int, optional
The class with respect to which we perform a one-vs-all fit. If None, then it is assumed that the given problem
is binary.
Cs : int | array-like, shape (n_cs,), default: 10
List of values for the regularization parameter or integer specifying the number of regularization parameters
that should be used. In this case, the parameters will be chosen in a logarithmic scale between 1e-4 and 1e4.
fit_intercept : bool, default: True
Whether to fit an intercept for the model. In this case the shape of the returned array is
(n_cs, n_features + 1).
max_iter : int, default: 100
Maximum number of iterations for the solver.
tol : float, default: 1e-4
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration will stop when ``max{|g_i | i = 1,
..., n} <= tol`` where ``g_i`` is the i-th component of the gradient.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive number for verbosity.
coef : array-like, shape (n_features,), optional
Initialization value for coefficients of logistic regression. Useless for liblinear solver.
random_state : int or RandomState, optional
Controls the randomness of the model. To obtain a deterministic behaviour during randomisation,
``random_state`` has to be fixed to an integer.
check_input : bool, default: True
If False, the input arrays X and y will not be checked.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If fit_intercept is set to True then the second
dimension will be n_features + 1, where the last item represents the intercept. For
``multiclass='multinomial'``, the shape is (n_classes, n_cs, n_features) or (n_classes, n_cs, n_features + 1).
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
"""
warn_unused_args(unused_args)
random_state = check_random_state(random_state)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, int(Cs))
# Data norm increases if intercept is included
if fit_intercept:
data_norm = np.sqrt(data_norm ** 2 + 1)
# Pre-processing.
if check_input:
X = check_array(X, accept_sparse='csr', dtype=np.float64, accept_large_sparse=True)
y = check_array(y, ensure_2d=False, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
if pos_class is None:
if classes.size > 2:
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
sample_weight = np.ones(X.shape[0], dtype=X.dtype)
# For doing a ovr, we need to mask the labels first.
output_vec = np.zeros(n_features + int(fit_intercept), dtype=X.dtype)
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=X.dtype)
y_bin[~mask] = 0.0 if SKL_LOSS_MODULE else -1.0
if coef is not None:
# it must work both giving the bias term and not
if coef.size not in (n_features, output_vec.size):
raise ValueError(f"Initialization coef is of shape {coef.size}, expected shape {n_features} or "
f"{output_vec.size}")
output_vec[:coef.size] = coef
target = y_bin
if SKL_LOSS_MODULE:
func = LinearModelLoss(base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept).loss_gradient
else:
func = _logistic_loss_and_grad
coefs = []
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
vector_mech = Vector(epsilon=epsilon, dimension=n_features + int(fit_intercept), alpha=1. / C,
function_sensitivity=0.25, data_sensitivity=data_norm, random_state=random_state)
noisy_logistic_loss = vector_mech.randomise(func)
args = (X, target, sample_weight, 1. / C) if SKL_LOSS_MODULE else (X, target, 1. / C, sample_weight)
iprint = [-1, 50, 1, 100, 101][np.searchsorted(np.array([0, 1, 2, 3]), verbose)]
output_vec, _, info = optimize.fmin_l_bfgs_b(noisy_logistic_loss, output_vec, fprime=None,
args=args, iprint=iprint, pgtol=tol, maxiter=max_iter)
if info["warnflag"] == 1:
warnings.warn("lbfgs failed to converge. Increase the number of iterations.", ConvergenceWarning)
coefs.append(output_vec.copy())
n_iter[i] = info['nit']
return np.array(coefs), np.array(Cs), n_iter
| {
"content_hash": "8d21029845be1ca266f9883603f4735d",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 120,
"avg_line_length": 43.03713527851459,
"alnum_prop": 0.6496764252696456,
"repo_name": "IBM/differential-privacy-library",
"id": "a775e50d9e20b0d076f9633006126ff7bacca159",
"size": "18913",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "diffprivlib/models/logistic_regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "580374"
}
],
"symlink_target": ""
} |
import tensortools as tt
import numpy as np
# Make synthetic dataset.
I, J, K, R = 25, 25, 25, 4 # dimensions and rank
X = tt.randn_ktensor((I, J, K), rank=R).full()
X += np.random.randn(I, J, K)
# Fit CP tensor decomposition to first 20 trials.
U = tt.cp_als(X[:, :, :20], rank=R, verbose=True)
# Extend and re-initialize the factors along the final mode.
Uext = U.factors.copy()
Uext.factors[-1] = np.random.randn(K, R)
Uext.shape = (I, J, K)
# Fit model to the full dataset, only fitting the final set of factors.
V = tt.cp_als(X, rank=R, init=Uext, skip_modes=[0, 1], verbose=True)
| {
"content_hash": "59b0edb6cd3976310a4350a8faebefe4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 71,
"avg_line_length": 32.833333333333336,
"alnum_prop": 0.6700507614213198,
"repo_name": "ahwillia/tensortools",
"id": "289239bd226828b0c637821dbdb94680452bd4be",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/cpd_skip_modes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71935"
},
{
"name": "TeX",
"bytes": "4358"
}
],
"symlink_target": ""
} |
from lib.PerfKit import PerfKit
from lib.Rally import Rally
from lib.Shaker import Shaker
from lib.WorkloadBase import WorkloadBase
import argparse
import logging
import sys
import yaml
import datetime
import os
from pykwalify import core as pykwalify_core
from pykwalify import errors as pykwalify_errors
_workload_opts = ['perfkit', 'rally', 'shaker']
_config_file = 'browbeat-config.yaml'
debug_log_file = 'log/debug.log'
def _load_config(path, _logger):
try:
stream = open(path, 'r')
except IOError:
_logger.error("Configuration file {} passed is missing".format(path))
exit(1)
config = yaml.load(stream)
stream.close()
validate_yaml(config, _logger)
return config
def validate_yaml(config, _logger):
_logger.info("Validating the configuration file passed by the user")
stream = open("lib/validate.yaml", 'r')
schema = yaml.load(stream)
check = pykwalify_core.Core(source_data=config, schema_data=schema)
try:
check.validate(raise_exception=True)
_logger.info("Validation successful")
except pykwalify_errors.SchemaError as e:
_logger.error("Schema Validation failed")
raise Exception('File does not conform to schema: {}'.format(e))
def _run_workload_provider(provider, config):
_logger = logging.getLogger('browbeat')
if provider == "perfkit":
perfkit = PerfKit(config)
perfkit.start_workloads()
elif provider == "rally":
rally = Rally(config)
rally.start_workloads()
elif provider == "shaker":
shaker = Shaker(config)
shaker.run_shaker()
else:
_logger.error("Unknown workload provider: {}".format(provider))
def main():
parser = argparse.ArgumentParser(
description="Browbeat Performance and Scale testing for Openstack")
parser.add_argument(
'-s',
'--setup',
nargs='?',
default=_config_file,
help='Provide Browbeat YAML configuration file. Default is ./{}'.format(_config_file))
parser.add_argument('workloads', nargs='*', help='Browbeat workload(s). Takes a space separated'
' list of workloads ({}) or \"all\"'.format(', '.join(_workload_opts)))
parser.add_argument('--debug', action='store_true', help='Enable Debug messages')
_cli_args = parser.parse_args()
_logger = logging.getLogger('browbeat')
_logger.setLevel(logging.DEBUG)
_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)7s - %(message)s')
_dbg_file = logging.FileHandler(debug_log_file)
_dbg_file.setLevel(logging.DEBUG)
_dbg_file.setFormatter(_formatter)
_ch = logging.StreamHandler()
if _cli_args.debug:
_ch.setLevel(logging.DEBUG)
else:
_ch.setLevel(logging.INFO)
_ch.setFormatter(_formatter)
_logger.addHandler(_dbg_file)
_logger.addHandler(_ch)
_logger.debug("CLI Args: {}".format(_cli_args))
# Load Browbeat yaml config file:
_config = _load_config(_cli_args.setup, _logger)
# Default to all workloads
if _cli_args.workloads == []:
_cli_args.workloads.append('all')
if len(_cli_args.workloads) == 1 and 'all' in _cli_args.workloads:
_cli_args.workloads = _workload_opts
invalid_wkld = [wkld for wkld in _cli_args.workloads if wkld not in _workload_opts]
if invalid_wkld:
_logger.error("Invalid workload(s) specified: {}".format(invalid_wkld))
if 'all' in _cli_args.workloads:
_logger.error("If you meant 'all' use: './browbeat.py all' or './browbeat.py'")
exit(1)
else:
time_stamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
_logger.info("Browbeat test suite kicked off")
_logger.info("Running workload(s): {}".format(','.join(_cli_args.workloads)))
for wkld_provider in _cli_args.workloads:
if wkld_provider in _config:
if _config[wkld_provider]['enabled']:
_run_workload_provider(wkld_provider, _config)
else:
_logger.warning("{} is not enabled in {}".format(wkld_provider,
_cli_args.setup))
else:
_logger.error("{} is missing in {}".format(wkld_provider, _cli_args.setup))
result_dir = _config['browbeat']['results']
WorkloadBase.print_report(result_dir, time_stamp)
_logger.info("Saved browbeat result summary to {}".format(
os.path.join(result_dir,time_stamp + '.' + 'report')))
WorkloadBase.print_summary()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "58ae92dea593796090b6c48e72818c62",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 100,
"avg_line_length": 37.886178861788615,
"alnum_prop": 0.6248927038626609,
"repo_name": "jtaleric/browbeat",
"id": "4edf0edc4318bd833187bb6c979347b48ae2b476",
"size": "4682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "browbeat.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "87444"
},
{
"name": "Shell",
"bytes": "12409"
}
],
"symlink_target": ""
} |
import datetime
import json
import random
import string
import webob
import webob.dec
from paste import urlmap
from glance import client as glance_client
from nova import auth
from nova import context
from nova import exception as exc
from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
from nova.api.openstack import ratelimiting
from nova.image import glance
from nova.image import local
from nova.image import service
from nova.tests import fake_flags
from nova.wsgi import Router
class Context(object):
pass
class FakeRouter(Router):
def __init__(self):
pass
@webob.dec.wsgify
def __call__(self, req):
res = webob.Response()
res.status = '200'
res.headers['X-Test-Success'] = 'True'
return res
def fake_auth_init(self, application):
self.db = FakeAuthDatabase()
self.context = Context()
self.auth = FakeAuthManager()
self.application = application
@webob.dec.wsgify
def fake_wsgi(self, req):
req.environ['nova.context'] = context.RequestContext(1, 1)
if req.body:
req.environ['inst_dict'] = json.loads(req.body)
return self.application
def wsgi_app(inner_application=None):
if not inner_application:
inner_application = openstack.APIRouter()
mapper = urlmap.URLMap()
api = openstack.FaultWrapper(auth.AuthMiddleware(
ratelimiting.RateLimitingMiddleware(inner_application)))
mapper['/v1.0'] = api
mapper['/'] = openstack.FaultWrapper(openstack.Versions())
return mapper
def stub_out_key_pair_funcs(stubs):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
def stub_out_image_service(stubs):
def fake_image_show(meh, context, id):
return dict(kernelId=1, ramdiskId=1)
stubs.Set(local.LocalImageService, 'show', fake_image_show)
def stub_out_auth(stubs):
def fake_auth_init(self, app):
self.application = app
stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fake_auth_init)
stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__call__', fake_wsgi)
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
super(ratelimiting.RateLimitingMiddleware, self).__init__(app)
self.application = app
stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
'__init__', fake_rate_init)
stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
'__call__', fake_wsgi)
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
stubs.Set(nova.flags, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
def snapshot(self, context, instance_id, name):
return 123
stubs.Set(nova.compute.API, 'snapshot', snapshot)
def stub_out_glance(stubs, initial_fixtures=None):
class FakeGlanceClient:
def __init__(self, initial_fixtures):
self.fixtures = initial_fixtures or []
def fake_get_images(self):
return [dict(id=f['id'], name=f['name'])
for f in self.fixtures]
def fake_get_images_detailed(self):
return self.fixtures
def fake_get_image_meta(self, image_id):
for f in self.fixtures:
if f['id'] == image_id:
return f
return None
def fake_add_image(self, image_meta):
id = ''.join(random.choice(string.letters) for _ in range(20))
image_meta['id'] = id
self.fixtures.append(image_meta)
return id
def fake_update_image(self, image_id, image_meta):
f = self.fake_get_image_meta(image_id)
if not f:
raise exc.NotFound
f.update(image_meta)
def fake_delete_image(self, image_id):
f = self.fake_get_image_meta(image_id)
if not f:
raise exc.NotFound
self.fixtures.remove(f)
##def fake_delete_all(self):
## self.fixtures = []
GlanceClient = glance_client.Client
fake = FakeGlanceClient(initial_fixtures)
stubs.Set(GlanceClient, 'get_images', fake.fake_get_images)
stubs.Set(GlanceClient, 'get_images_detailed',
fake.fake_get_images_detailed)
stubs.Set(GlanceClient, 'get_image_meta', fake.fake_get_image_meta)
stubs.Set(GlanceClient, 'add_image', fake.fake_add_image)
stubs.Set(GlanceClient, 'update_image', fake.fake_update_image)
stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image)
#stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all)
class FakeToken(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
class FakeRequestContext(object):
def __init__(self, user, project, *args, **kwargs):
self.user_id = 1
self.project_id = 1
class FakeAuthDatabase(object):
data = {}
@staticmethod
def auth_get_token(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
def auth_create_token(context, token):
fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
return fake_token
@staticmethod
def auth_destroy_token(context, token):
if token.token_hash in FakeAuthDatabase.data:
del FakeAuthDatabase.data['token_hash']
class FakeAuthManager(object):
auth_data = {}
def add_user(self, key, user):
FakeAuthManager.auth_data[key] = user
def get_user(self, uid):
for k, v in FakeAuthManager.auth_data.iteritems():
if v.id == uid:
return v
return None
def get_project(self, pid):
return None
def get_user_from_access_key(self, key):
return FakeAuthManager.auth_data.get(key, None)
class FakeRateLimiter(object):
def __init__(self, application):
self.application = application
@webob.dec.wsgify
def __call__(self, req):
return self.application
| {
"content_hash": "81d9472015f32020219babca3c279d31",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 75,
"avg_line_length": 27.467248908296945,
"alnum_prop": 0.6413354531001589,
"repo_name": "anotherjesse/nova",
"id": "fb282f1c97340c8e79b053216002ad637213d424",
"size": "6965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/fakes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "47238"
},
{
"name": "Python",
"bytes": "1445578"
},
{
"name": "Shell",
"bytes": "37610"
}
],
"symlink_target": ""
} |
from pbcore.io.FastaIO import FastaReader, FastaWriter
def fasta_count( fasta_file ):
count = 0
try:
for record in FastaReader( fasta_file ):
if len(record.sequence) > 0:
count += 1
except:
return 0
return count
def fasta_names( fasta_file ):
return set([f.name.strip() for f in FastaReader( fasta_file )])
def copy_fasta_sequences( fasta_file, fasta_writer ):
for fasta_record in FastaReader( fasta_file ):
fasta_writer.writeRecord( fasta_record )
def copy_fasta_list( sequence_list, output_file ):
with FastaWriter( output_file ) as writer:
with open( sequence_list ) as handle:
for line in handle:
sequence_file = line.strip()
copy_fasta_sequences( sequence_file, writer )
if __name__ == '__main__':
import sys
sequence_list = sys.argv[1]
output_file = sys.argv[2]
copy_fasta_list( sequence_list, output_file ) | {
"content_hash": "bcacdd7086b951d2a9c992db7412767d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 67,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.618801652892562,
"repo_name": "PacificBiosciences/rDnaTools",
"id": "0359652c5f8d9833b6848dc666dddc3e2b711580",
"size": "968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pbrdna/fasta/utils.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "248288"
}
],
"symlink_target": ""
} |
from pyglottolog.glottodata import GlottoData
db = GlottoData()
print len(db.GetLanguages())
print db.GetLanguages()[:10]
| {
"content_hash": "98a5312378f5a0a4d3704898e42d2014",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 24.6,
"alnum_prop": 0.7804878048780488,
"repo_name": "sarum90/glottodatagrab",
"id": "a7406d4cadb89529e297da532413d505ee8fb5f3",
"size": "124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sample.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6512"
}
],
"symlink_target": ""
} |
import FBConnect
import sys, traceback
class FBConnectIface:
friendsList = None
uid = None
fbconn = None
def __init__(self, randomToken):
try:
self.db_filename = 'C:/Stuffs/www/db.sqlite'
self.fbconn = FBConnect.FBConnect(self.db_filename)
self.randomToken = randomToken
except:
print 'Exception in function: '
traceback.print_exception(sys.exc_info()[ 0 ], sys.exc_info()[ 1 ], sys.exc_info()[ 2 ], limit=4)
def fetchFriends(self):
self.friendsList = self.fbconn.fetchFriends(self.randomToken)['data']
return self.friendsList
def isFriend(self, uid):
friendRet = False
try:
for friend in self.fetchFriends():
if friend[ 'id' ] == uid:
friendRet = True
except ValueError:
friendRet = False
except:
friendRet = False
print friendRet
return friendRet
def isValidToken(self):
if self.fbconn.getAccessToken(self.randomToken) is not None:
return True
return False
def fetchUid(self):
self.uid = self.fbconn.getFBUser(self.randomToken).fetchUid()
return self.uid | {
"content_hash": "723d13ccc66c6c90f2b82b48ab97e468",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 109,
"avg_line_length": 30.466666666666665,
"alnum_prop": 0.5331874544128373,
"repo_name": "NareshPS/pyDCHub",
"id": "40b515b0dbe5b4ee76c41a207b39988c57b94f25",
"size": "1371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/FBConnectIface/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "196062"
}
],
"symlink_target": ""
} |
"""
This module defines an :py:class:`aiohttp.ClientSession` adapter
that returns awaitable responses.
"""
# Standard library imports
import asyncio
import collections
import inspect
import threading
from concurrent import futures
# Third-party imports
try:
import aiohttp
except ImportError: # pragma: no cover
aiohttp = None
# Local imports
from uplink.clients import exceptions, io, interfaces, register
def threaded_callback(callback):
async def new_callback(response):
if isinstance(response, aiohttp.ClientResponse):
await response.text()
response = ThreadedResponse(response)
response = callback(response)
if isinstance(response, ThreadedResponse):
return response.unwrap()
else:
return response
return new_callback
class AiohttpClient(interfaces.HttpClientAdapter):
"""
An :py:mod:`aiohttp` client that creates awaitable responses.
Note:
This client is an optional feature and requires the :py:mod:`aiohttp`
package. For example, here's how to install this extra using pip::
$ pip install uplink[aiohttp]
Args:
session (:py:class:`aiohttp.ClientSession`, optional):
The session that should handle sending requests. If this
argument is omitted or set to :py:obj:`None`, a new session
will be created.
"""
exceptions = exceptions.Exceptions()
# TODO: Update docstrings to include aiohttp constructor parameters.
__ARG_SPEC = collections.namedtuple("__ARG_SPEC", "args kwargs")
def __init__(self, session=None, **kwargs):
if aiohttp is None:
raise NotImplementedError("aiohttp is not installed.")
self._auto_created_session = False
if session is None:
session = self._create_session(**kwargs)
self._session = session
self._sync_callback_adapter = threaded_callback
def __del__(self):
# TODO: Consider replacing this with a close method
if self._auto_created_session:
# aiohttp v3.0 has made ClientSession.close a coroutine,
# so we check whether it is one here and register it
# to run appropriately at exit
if asyncio.iscoroutinefunction(self._session.close):
asyncio.get_event_loop().run_until_complete(
self._session.close()
)
else:
self._session.close()
async def session(self):
"""Returns the underlying `aiohttp.ClientSession`."""
if isinstance(self._session, self.__ARG_SPEC):
args, kwargs = self._session
self._session = aiohttp.ClientSession(*args, **kwargs)
self._auto_created_session = True
return self._session
def wrap_callback(self, callback):
func = inspect.unwrap(callback)
if not asyncio.iscoroutinefunction(func):
callback = self._sync_callback_adapter(callback)
return callback
@staticmethod
@register.handler
def with_session(session, *args, **kwargs):
"""
Builds a client instance if the first argument is a
:py:class:`aiohttp.ClientSession`. Otherwise, return :py:obj:`None`.
"""
if isinstance(session, aiohttp.ClientSession):
return AiohttpClient(session, *args, **kwargs)
@classmethod
def _create_session(cls, *args, **kwargs):
return cls.__ARG_SPEC(args, kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Builds a client instance with
:py:class:`aiohttp.ClientSession` arguments.
Instead of directly initializing this class with a
:py:class:`aiohttp.ClientSession`, use this method to have the
client lazily construct a session when sending the first
request. Hence, this method guarantees that the creation of the
underlying session happens inside of a coroutine.
Args:
*args: positional arguments that
:py:class:`aiohttp.ClientSession` takes.
**kwargs: keyword arguments that
:py:class:`aiohttp.ClientSession` takes.
"""
session_build_args = cls._create_session(*args, **kwargs)
return AiohttpClient(session=session_build_args)
async def send(self, request):
method, url, extras = request
session = await self.session()
response = await session.request(method, url, **extras)
# Make `aiohttp` response "quack" like a `requests` response
response.status_code = response.status
return response
def apply_callback(self, callback, response):
return self.wrap_callback(callback)(response)
@staticmethod
def io():
return io.AsyncioStrategy()
class ThreadedCoroutine(object):
def __init__(self, coroutine):
self.__coroutine = coroutine
def __call__(self, *args, **kwargs):
with AsyncioExecutor() as executor:
future = executor.submit(self.__coroutine, *args, **kwargs)
result = future.result()
return result
class ThreadedResponse(object):
def __init__(self, response):
self.__response = response
def __getattr__(self, item):
value = getattr(self.__response, item)
if asyncio.iscoroutinefunction(value):
return ThreadedCoroutine(value)
return value
def unwrap(self):
return self.__response
class AsyncioExecutor(futures.Executor):
"""
Executor that runs asyncio coroutines in a shadow thread.
Credit to Vincent Michel, who wrote the original implementation:
https://gist.github.com/vxgmichel/d16e66d1107a369877f6ef7e646ac2e5
"""
def __init__(self):
self._loop = asyncio.new_event_loop()
self._thread = threading.Thread(target=self._target)
self._thread.start()
def _target(self):
asyncio.set_event_loop(self._loop)
self._loop.run_forever()
def submit(self, fn, *args, **kwargs):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
def shutdown(self, wait=True):
self._loop.call_soon_threadsafe(self._loop.stop)
if wait: # pragma: no cover
self._thread.join()
# === Register client exceptions === #
if aiohttp is not None: # pragma: no cover
AiohttpClient.exceptions.BaseClientException = aiohttp.ClientError
AiohttpClient.exceptions.ConnectionError = aiohttp.ClientConnectionError
AiohttpClient.exceptions.ConnectionTimeout = aiohttp.ClientConnectorError
AiohttpClient.exceptions.ServerTimeout = aiohttp.ServerTimeoutError
AiohttpClient.exceptions.SSLError = aiohttp.ClientSSLError
AiohttpClient.exceptions.InvalidURL = aiohttp.InvalidURL
| {
"content_hash": "780b5d7c9a1d87e3bc1733d53b0d0ecc",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 77,
"avg_line_length": 33.1207729468599,
"alnum_prop": 0.6474620770128354,
"repo_name": "prkumar/uplink",
"id": "4d3a3f362caef6ab8445e3294f5b6efb61d8f94a",
"size": "6856",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uplink/clients/aiohttp_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "348057"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from unittest import mock
import pytest
from sklearn import datasets
import sklearn.neighbors as knn
import mlflow
import random
from mlflow import MlflowClient
from mlflow.entities.model_registry import ModelVersion
from mlflow.models import add_libraries_to_model
from mlflow.models.utils import get_model_version_from_model_uri
ModelWithData = namedtuple("ModelWithData", ["model", "inference_data"])
@pytest.fixture(scope="module")
def sklearn_knn_model():
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
knn_model = knn.KNeighborsClassifier()
knn_model.fit(X, y)
return ModelWithData(model=knn_model, inference_data=X)
def random_int(lo=1, hi=1000000000):
return random.randint(lo, hi)
def test_adding_libraries_to_model_default(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sk_model=sklearn_knn_model.model,
artifact_path=artifact_path,
registered_model_name=model_name,
)
wheeled_model_info = add_libraries_to_model(model_uri)
assert wheeled_model_info.run_id == run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_new_run(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sk_model=sklearn_knn_model.model,
artifact_path=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
wheeled_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
wheeled_model_info = add_libraries_to_model(model_uri)
assert original_run_id != wheeled_run_id
assert wheeled_model_info.run_id == wheeled_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == wheeled_run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_run_id_passed(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{model_name}/2"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sk_model=sklearn_knn_model.model,
artifact_path=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
wheeled_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
pass
wheeled_model_info = add_libraries_to_model(model_uri, run_id=wheeled_run_id)
assert original_run_id != wheeled_run_id
assert wheeled_model_info.run_id == wheeled_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == wheeled_run_id
assert wheeled_model_version.name == model_name
def test_adding_libraries_to_model_new_model_name(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
wheeled_model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
wheeled_model_uri = f"models:/{wheeled_model_name}/1"
# Log a model
with mlflow.start_run():
mlflow.sklearn.log_model(
sk_model=sklearn_knn_model.model,
artifact_path=artifact_path,
registered_model_name=model_name,
)
with mlflow.start_run():
new_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
wheeled_model_info = add_libraries_to_model(
model_uri, registered_model_name=wheeled_model_name
)
assert wheeled_model_info.run_id == new_run_id
# Verify new model version created
wheeled_model_version = get_model_version_from_model_uri(wheeled_model_uri)
assert wheeled_model_version.run_id == new_run_id
assert wheeled_model_version.name == wheeled_model_name
assert wheeled_model_name != model_name
def test_adding_libraries_to_model_when_version_source_None(sklearn_knn_model):
model_name = f"wheels-test-{random_int()}"
artifact_path = "model"
model_uri = f"models:/{model_name}/1"
# Log a model
with mlflow.start_run():
original_run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id
mlflow.sklearn.log_model(
sk_model=sklearn_knn_model.model,
artifact_path=artifact_path,
registered_model_name=model_name,
)
model_version_without_source = ModelVersion(name=model_name, version=1, creation_timestamp=124)
assert model_version_without_source.run_id is None
with mock.patch.object(
MlflowClient, "get_model_version", return_value=model_version_without_source
) as mlflow_client_mock:
wheeled_model_info = add_libraries_to_model(model_uri)
assert wheeled_model_info.run_id is not None
assert wheeled_model_info.run_id != original_run_id
mlflow_client_mock.assert_called_once_with(model_name, "1")
| {
"content_hash": "bdb2efe4b78055fe1fa87a248e1fa6db",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 99,
"avg_line_length": 36.39877300613497,
"alnum_prop": 0.674869374683971,
"repo_name": "mlflow/mlflow",
"id": "083bdf773fa632f1b5759b515196a2d7ac0a5459",
"size": "5933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/models/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24965"
},
{
"name": "Dockerfile",
"bytes": "1206"
},
{
"name": "HTML",
"bytes": "16439"
},
{
"name": "Java",
"bytes": "276538"
},
{
"name": "JavaScript",
"bytes": "3606345"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "6057051"
},
{
"name": "R",
"bytes": "202454"
},
{
"name": "Scala",
"bytes": "39353"
},
{
"name": "Shell",
"bytes": "27246"
},
{
"name": "TSQL",
"bytes": "211"
},
{
"name": "TypeScript",
"bytes": "313772"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import logging
import traceback
import sys
from StringIO import StringIO
from django import forms
from django.forms.models import modelform_factory
from django.conf import settings
from django.shortcuts import render_to_response
from lxml import etree
from piston.handler import BaseHandler
from piston import resource
from piston.utils import rc, validate#, decorator
from sana.mrs.models import Procedure, RequestLog
from sana.mrs.util import enable_logging
from sana.api.forms import *
from django.template import RequestContext
dtd = None
try:
dtd = etree.DTD(file=file(settings.PROCEDURE_DTD,'r+b'))
except:
logging.error("Procedure DTD file does not exist")
class XMLValidationHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
model = Procedure
exclude = ('created', 'modified',)
#@validate
def create(self,request):#,*args,**kwargs):
msg = dict(request.POST)
try:
xml = msg['text'][0]
lines = [x for x in xml.split("\n")]
try:
exml = etree.XML(str(xml))
if dtd.validate(exml):
return "..........ok!"
else:
error_list = [ { 'line': x[1],'message':x[6] } for x in [ str(y).split(":") for y in list(dtd.error_log)]]
return render_to_response("xml/errors.html",
{'error_list': error_list})
except etree.LxmlError as e:
l = [x for x in str(e.message).split(",")] if (e.message and len(e.message)) > 0 else [u"Empty line",u"Add text", u"1", ]
return render_to_response("xml/errors.html",
{'error_list': [ {'line': l[2].strip("line "), "message": l[0] + ", " +l[1]}]})
except:
return sys.exc_info()
@enable_logging
def read(self,request, uuid=None, m=None):
query = dict(request.GET)
try:
if m:
manifest = u'<manifest>'
return Procedure.objects.all()
elif uuid:
p = Procedure.objects.filter(procedure_guid__exact=uuid)[0]
with open(p.xml.path, 'rb') as f:
xml = f.read()
return xml
else:
return modelform_factory(self.model)
except:
return "FAIL"
return "??????????"
class ManifestHandler(BaseHandler):
allowed_methods = ('GET', 'POST')
model = Procedure
exclude = ('created', 'modified',)
#@validate
def create(self,request,*args,**kwargs):
pass
def read(self,request, uuid=None):
try:
if not uuid:
manifest = u'<manifest>'
return Procedure.objects.all()
else:
p = Procedure.objects.filter(procedure_guid__exact=uuid)[0]
with open(p.xml.path, 'rb') as f:
xml = f.read()
return xml
except:
return sys.exc_info()
#return rc.NOT_FOUND
class BaseDispatchHandler(BaseHandler):
"""
Base handler for api model objects following basic CRUD approach using
django-piston api. Extending classes must specify:
model
Sana api model class
v_form
ModelForm used for validating
Additionally, extending classes may specify:
allowed_methods
CRUD methods which will be accepted
"""
@validate('POST')
def create(self,request):
""" POST request method.
Parameters:
request
a form based http POST request
"""
new_obj = request.form.save()
return render_to_response("sanctuary/form.html",
{'form':request.form },
RequestContext(request))
@validate('GET')
def read(self,request, *args, **kwargs):
""" GET request method for retrieving existing records.
Parameters:
request
an http GET request
args
slug for identifying a record
"""
query = dict(request.REQUEST.items())
if not query.get('meta',0):
return dict(request.META)
context = RequestContext(request)
try:
klass = getattr(self.__class__, 'model')
form_klass = getattr(self.__class__, 'v_form')
if request.form.data:
obj = klass.objects.get(**query)#request.form.data)
form = form_klass(instance=object)
else:
form = form_klass()
return render_to_response("form.html",
{'form':form },
context_instance=context)
except:
typ, val, tb = sys.exc_info()
# debugging prints remove prior to release
#for t in traceback.format_tb(tb):
# Return empty form
return render_to_response("form.html",
{'form':request.form },
context_instance=context)
@validate('PUT')
def update(self,request):
""" PUT request method for updating existing records.
Parameters
request
an http PUT request
"""
return render_to_response("form.html",
{'form':request.form },
RequestContext(request))
@validate('DELETE')
def delete(self,request, *args):
""" DELETE request method for deleting existing records.
Parameters
request
an http DELETE request
args
placeholder for slug used to identify record
"""
return render_to_response("form.html",
{'form':request.form },
RequestContext(request))
class RequestLogHandler(BaseDispatchHandler):
allowed_methods = ('GET')
model = RequestLog
v_form = RequestLogForm
#@validate('GET')
#def read(self, request, page):
# return self.request(page=page)
def read(self, request, *args, **kwargs):
query = dict(request.GET.items())
page = int(query.get('page', 1 ))
page_size = int(query.get('page_size', 20))
rate = int(query.get('refresh', 5))
log_list = RequestLog.objects.all().order_by('-timestamp')
log_count = log_list.count()
page_count = int( log_count / page_size) + 1
if log_count > page_size:
page_range = range(1, page_count) if page_count > 1 else range (0,1)
object_list = log_list[((page-1)*page_size):page*page_size ]
else:
page_range = range(0,1)
object_list = log_list
return render_to_response('logging/index.html',
{'object_list': object_list,
'page_range': page_range,
'page_size': page_size,
'page': page })
class RequestLogTableHandler(BaseDispatchHandler):
allowed_methods = ('GET')
model = RequestLog
v_form = RequestLogForm
#@validate('GET')
#def read(self, request, page):
# return self.request(page=page)
def read(self, request, *args, **kwars):
query = dict(request.GET.items())
page_size = int(query.get('page_size', 20))
page = int(query.get('page',1))
rate = int(query.get('refresh', 5))
log_list = RequestLog.objects.all().order_by('-timestamp')
log_count = log_list.count()
page_count = int( log_count / page_size) + 1
if log_count > page_size:
page_range = range(1, page_count) if page_count > 1 else range (0,1)
object_list = log_list[((page-1)*page_size):page*page_size ]
else:
page_range = range(0,1)
object_list = log_list
return render_to_response('logging/list.html',
{'object_list': object_list,
'page_range': page_range,
'page_size': page_size,
'page': page,})
| {
"content_hash": "a398133cac7980555e72900d36cad253",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 138,
"avg_line_length": 34.20816326530612,
"alnum_prop": 0.5263095096050591,
"repo_name": "SanaMobile/middleware_mds_v1",
"id": "b8f1a01fbbe91ee4626e862ffaab8d75fa425b11",
"size": "8381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mds/api/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "591"
},
{
"name": "Python",
"bytes": "162057"
},
{
"name": "Shell",
"bytes": "1092"
}
],
"symlink_target": ""
} |
'''
onshape_api, based on onshape "apikey"
======
Demonstrates usage of API keys for the Onshape REST API
'''
__copyright__ = 'Copyright (c) 2016 Onshape, Inc.'
__license__ = 'All rights reserved.'
__title__ = 'onshape_api'
__all__ = ['onshape', 'client', 'utils']
| {
"content_hash": "2db19d70601bac798af7171300325ef0",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 55,
"avg_line_length": 24.272727272727273,
"alnum_prop": 0.6329588014981273,
"repo_name": "Rhoban/onshape-to-robot",
"id": "6d3966ec74eec53890e0f4257f2cd922cb37a1f1",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onshape_to_robot/onshape_api/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "109287"
}
],
"symlink_target": ""
} |
import math
import sys
import pyopencl as cl
import numpy
from PIL import Image
from log import logCall
@logCall
def mergeImages(images, clContext, clQueue):
if not hasattr(mergeImages, "program"):
kernelFile = open('src/kernels/mergeImages.cl', 'r')
mergeImages.program = cl.Program(clContext, kernelFile.read()).build()
kernelFile.close()
mf = cl.mem_flags
output = numpy.zeros((images[0].shape[0], images[0].shape[1] * len(images))).astype(numpy.uint8)
outputBuffer = cl.Buffer(clContext, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=output)
for currentImage in range(len(images)):
image = images[currentImage]
imageBuffer = cl.Buffer(clContext, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=image)
mergeImages.program.mergeImages(clQueue, [images[0].size], None, imageBuffer, outputBuffer, numpy.uint32(images[0].shape[1]), numpy.uint32(images[0].shape[0]), numpy.uint32(len(images)), numpy.uint32(currentImage)).wait()
cl.enqueue_read_buffer(clQueue, outputBuffer, output).wait()
return output
| {
"content_hash": "a2e3468c18659aaf8b57f7740f1fa494",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 229,
"avg_line_length": 35.9,
"alnum_prop": 0.7047353760445683,
"repo_name": "hortont424/contrasty",
"id": "9fe193477a0ea3e4ebbf9f4d0fdafa638886d014",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/filters/merge.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Common Lisp",
"bytes": "5484"
},
{
"name": "Python",
"bytes": "19519"
},
{
"name": "Shell",
"bytes": "228"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
from django.dispatch import receiver
from django.db.models.signals import post_save
from ..models import Entry
@receiver(post_save, sender=Entry)
def on_entry_created(sender, instance, created, **kwargs):
if not created:
return
instance.send_updates()
| {
"content_hash": "d530484e3fd08fc636f999630c35aec8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 23.571428571428573,
"alnum_prop": 0.7424242424242424,
"repo_name": "conikuvat/shoottikala",
"id": "1317d6e66e59de6e5e033d69a30cf9d212b716f1",
"size": "349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "event_log/handlers/entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "865"
},
{
"name": "HTML",
"bytes": "17524"
},
{
"name": "Python",
"bytes": "81008"
},
{
"name": "Shell",
"bytes": "4815"
}
],
"symlink_target": ""
} |
import urllib
from lxml import etree
from tempest.common import http
from tempest.common import rest_client
from tempest.common import xml_utils as common
from tempest import config
CONF = config.CONF
XMLNS = "http://docs.openstack.org/identity/api/v3"
class RegionClientXML(rest_client.RestClient):
TYPE = "xml"
def __init__(self, auth_provider):
super(RegionClientXML, self).__init__(auth_provider)
self.service = CONF.identity.catalog_type
self.region_url = 'adminURL'
self.api_version = "v3"
def _parse_array(self, node):
array = []
for child in node.getchildren():
tag_list = child.tag.split('}', 1)
if tag_list[1] == "region":
array.append(common.xml_to_json(child))
return array
def _parse_body(self, body):
json = common.xml_to_json(body)
return json
def request(self, method, url, extra_headers=False, headers=None,
body=None, wait=None):
"""Overriding the existing HTTP request in super class RestClient."""
if extra_headers:
try:
headers.update(self.get_headers())
except (ValueError, TypeError):
headers = self.get_headers()
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
return super(RegionClientXML, self).request(method, url,
extra_headers,
headers=headers,
body=body)
def create_region(self, description, **kwargs):
"""Create region."""
create_region = common.Element("region",
xmlns=XMLNS,
description=description)
if 'parent_region_id' in kwargs:
create_region.append(common.Element(
'parent_region_id', kwargs.get('parent_region_id')))
if 'unique_region_id' in kwargs:
resp, body = self.put(
'regions/%s' % kwargs.get('unique_region_id'),
str(common.Document(create_region)))
else:
resp, body = self.post('regions',
str(common.Document(create_region)))
body = self._parse_body(etree.fromstring(body))
return resp, body
def update_region(self, region_id, **kwargs):
"""Updates an region with given parameters.
"""
description = kwargs.get('description', None)
update_region = common.Element("region",
xmlns=XMLNS,
description=description)
if 'parent_region_id' in kwargs:
update_region.append(common.Element('parent_region_id',
kwargs.get('parent_region_id')))
resp, body = self.patch('regions/%s' % str(region_id),
str(common.Document(update_region)))
body = self._parse_body(etree.fromstring(body))
return resp, body
def get_region(self, region_id):
"""Get Region."""
url = 'regions/%s' % region_id
resp, body = self.get(url)
body = self._parse_body(etree.fromstring(body))
return resp, body
def list_regions(self, params=None):
"""Get the list of regions."""
url = 'regions'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = self._parse_array(etree.fromstring(body))
return resp, body
def delete_region(self, region_id):
"""Delete region."""
resp, body = self.delete('regions/%s' % region_id)
return resp, body
| {
"content_hash": "6a8f97c06572f234c097bed5d897a1f5",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 37.095238095238095,
"alnum_prop": 0.5427471116816431,
"repo_name": "Mirantis/tempest",
"id": "f854138bd5aedd46deb126a2e3f89f71c64048d1",
"size": "4551",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/services/identity/v3/xml/region_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
} |
"""File operations for User authorization."""
from sqlalchemy import exists
from sqlalchemy.orm.exc import NoResultFound
from hydrus.data.exceptions import UserExists, UserNotFound
from hydrus.data.db_models import User, Token, Nonce
from hashlib import sha224
import base64
# import random
from sqlalchemy.orm.session import Session
from werkzeug.local import LocalProxy
from random import randrange
from datetime import datetime, timedelta
from uuid import uuid4
def add_user(id_: int, paraphrase: str, session: Session) -> None:
"""Add new users to the database.
Raises:
UserExits: If a user with `id_` already exists.
"""
if session.query(exists().where(User.id == id_)).scalar():
raise UserExists(id_=id_)
else:
new_user = User(id=id_, paraphrase=sha224(
paraphrase.encode('utf-8')).hexdigest())
session.add(new_user)
session.commit()
def check_nonce(request: LocalProxy, session: Session) -> bool:
"""check validity of nonce passed by the user."""
try:
id_ = request.headers['X-Authentication']
nonce = session.query(Nonce).filter(Nonce.id == id_).one()
present = datetime.now()
present = present - nonce.timestamp
session.delete(nonce)
session.commit()
if present > timedelta(0, 0, 0, 0, 1, 0, 0):
return False
except BaseException:
return False
return True
def create_nonce(session: Session) -> str:
"""
Create a one time use nonce valid for a short time
for user authentication.
"""
nonce = str(uuid4())
time = datetime.now()
new_nonce = Nonce(id=nonce, timestamp=time)
session.add(new_nonce)
session.commit()
return nonce
def add_token(request: LocalProxy, session: Session) -> str:
"""
Create a new token for the user or return a
valid existing token to the user.
"""
token = None
id_ = int(request.authorization['username'])
try:
token = session.query(Token).filter(Token.user_id == id_).one()
if not token.is_valid():
update_token = '%030x' % randrange(16**30)
token.id = update_token
token.timestamp = datetime.now()
session.commit()
except NoResultFound:
token = '%030x' % randrange(16**30)
new_token = Token(user_id=id_, id=token)
session.add(new_token)
session.commit()
return token
return token.id
def check_token(request: LocalProxy, session: Session) -> bool:
"""
check validity of the token passed by the user.
"""
token = None
try:
id_ = request.headers['X-Authorization']
token = session.query(Token).filter(Token.id == id_).one()
if not token.is_valid():
token.delete()
return False
except BaseException:
return False
return True
def generate_basic_digest(id_: int, paraphrase: str) -> str:
"""Create the digest to be added to the HTTP Authorization header."""
paraphrase_digest = sha224(paraphrase.encode('utf-8')).hexdigest()
credentials = '{}:{}'.format(id_, paraphrase_digest)
digest = base64.b64encode(credentials.encode('utf-8')).decode('utf-8')
return digest
def authenticate_user(id_: int, paraphrase: str, session: Session) -> bool:
"""Authenticate a user based on the ID and his paraphrase.
Raises:
UserNotFound: If a user with `id_` is not a valid/defined User
"""
user = None
try:
user = session.query(User).filter(User.id == id_).one()
except NoResultFound:
raise UserNotFound(id_=id_)
hashvalue = user.paraphrase
generated_hash = sha224(paraphrase.encode('utf-8')).hexdigest()
return generated_hash == hashvalue
def check_authorization(request: LocalProxy, session: Session) -> bool:
"""Check if the request object has the correct authorization."""
auth = request.authorization
if check_nonce(request, session):
return authenticate_user(auth.username, auth.password, session)
return False
| {
"content_hash": "4c24333f9d7b9a753a57bc53f8d65ed1",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 75,
"avg_line_length": 30.916666666666668,
"alnum_prop": 0.6439598137711345,
"repo_name": "xadahiya/hydrus",
"id": "bb2cf7b91c8b5310102a2c9d906f6026c0ad3145",
"size": "4081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hydrus/data/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "444"
},
{
"name": "Python",
"bytes": "223572"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('indicators', '0043_indicator_setup_remove_asterisks'),
]
operations = [
migrations.CreateModel(
name='ResultsIndicator',
fields=[
],
options={
'proxy': True,
'indexes': [],
},
bases=('indicators.indicator',),
),
]
| {
"content_hash": "0365e815830b42e1924477e5f231b0cc",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 64,
"avg_line_length": 21.47826086956522,
"alnum_prop": 0.5080971659919028,
"repo_name": "mercycorps/TolaActivity",
"id": "3377c69c1f2bbe6191bf475c5e5391acc8e47923",
"size": "567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indicators/migrations/0044_resultsindicator_proxy_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
from datetime import datetime
from django.core.exceptions import ValidationError
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.core.urlresolvers import reverse
from django.template.defaultfilters import truncatewords, slugify
from django.template.loader import render_to_string
from django.db import models
from uuidfield import UUIDField
from tinyblog.utils import (
get_from_email,
get_site_domain,
tinyblog_bleach
)
class CurrentSubscribersManager(models.Manager):
def get_queryset(self):
return super(CurrentSubscribersManager,
self).get_queryset().filter(confirmed=True,
unsubscribed=False)
class EmailSubscriber(models.Model):
email = models.EmailField()
subscribed = models.DateTimeField(auto_now=True)
confirmed = models.BooleanField(default=False)
unsubscribed = models.BooleanField(default=False)
# Used for showing the e-mail address after initially signing up
uuid_first = UUIDField(auto=True, verbose_name=u'Sign-up Key')
# Used for confirming that this e-mail address is genuine
uuid_second = UUIDField(auto=True, verbose_name=u'Confirmation Key')
objects = models.Manager()
current_objects = CurrentSubscribersManager()
def __unicode__(self):
return self.email
def confirm_url(self):
relative_url = reverse('tinyblog_subscribe_confirm',
args=[self.uuid_second, ])
return u'http://{0}{1}'.format(get_site_domain(), relative_url)
def unsubscribe_url(self):
relative_url = reverse('tinyblog_unsubscribe')
return u'http://{0}{1}'.format(get_site_domain(), relative_url)
class PublishedPostManager(models.Manager):
def get_queryset(self):
return super(PublishedPostManager,
self).get_queryset().filter(created__lte=datetime.now())
class Post(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique_for_month='created')
created = models.DateTimeField()
updated = models.DateTimeField(auto_now=True)
teaser_html = models.TextField(verbose_name='Teaser')
text_html = models.TextField(verbose_name='Main text')
emailed = models.BooleanField(default=False)
objects = models.Manager()
published_objects = PublishedPostManager()
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('tinyblog_post',
args=[self.created.year,
'%02d' % self.created.month,
self.slug])
def clean(self):
# Don't allow draft entries to have a pub_date.
if slugify(self.slug) != self.slug:
raise ValidationError('Slugs must only contain lowercase '
'characters, numbers and hyphens.')
def bleached_teaser(self):
return tinyblog_bleach(self.teaser_html)
def bleached_text(self):
return tinyblog_bleach(self.text_html)
def get_teaser(self):
if self.teaser_html:
return self.teaser_html
return truncatewords(self.text_html, 100)
def full_text(self):
return self.teaser_html + u'\n' + self.text_html
def bleached_full_text(self):
return tinyblog_bleach(self.full_text())
def published(self):
return self.created <= datetime.now()
def generate_mail(self, subscriber, domain):
text_content = render_to_string('tinyblog/emails/blog_post.txt',
{'user': subscriber,
'site': domain,
'post': self})
html_content = render_to_string('tinyblog/emails/blog_post.html',
{'user': subscriber,
'site': domain,
'post': self})
msg = EmailMultiAlternatives(self.title, text_content,
get_from_email(),
[subscriber.email, ])
msg.attach_alternative(html_content, "text/html")
return msg
def mail_subscribers(self):
mail_queue = []
subscribers = EmailSubscriber.current_objects.all()
domain = get_site_domain()
seen_subscribers = set()
for subscriber in subscribers:
if subscriber.email in seen_subscribers:
continue
mail_queue.append(self.generate_mail(subscriber, domain))
seen_subscribers.add(subscriber.email)
connection = mail.get_connection()
connection.open()
connection.send_messages(mail_queue)
connection.close()
return seen_subscribers
@classmethod
def get_next_post_to_email(cls):
posts = Post.published_objects.order_by('created')
posts = posts.filter(emailed=False).all()[:1]
if not posts:
raise Post.DoesNotExist
return posts[0]
class Meta:
ordering = ['-created']
| {
"content_hash": "715236b690a9406188e4e8d3d7323d6d",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 77,
"avg_line_length": 32.87341772151899,
"alnum_prop": 0.6074316519060454,
"repo_name": "dominicrodger/tinyblog",
"id": "c8c2be54de7a43ce5c546c68bf68bc5500b2cf8c",
"size": "5194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tinyblog/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6445"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "Python",
"bytes": "52339"
}
],
"symlink_target": ""
} |
"""
Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import os
import types
import socket
import sys
import operator
try:
import fcntl
except ImportError:
fcntl = None
from zope.interface import implements, classImplements
try:
from OpenSSL import SSL
except ImportError:
SSL = None
from twisted.python.runtime import platformType
if platformType == 'win32':
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
EPERM = object()
from errno import WSAEINVAL as EINVAL
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
from errno import WSAEINPROGRESS as EINPROGRESS
from errno import WSAEALREADY as EALREADY
from errno import WSAECONNRESET as ECONNRESET
from errno import WSAEISCONN as EISCONN
from errno import WSAENOTCONN as ENOTCONN
from errno import WSAEINTR as EINTR
from errno import WSAENOBUFS as ENOBUFS
from errno import WSAEMFILE as EMFILE
# No such thing as WSAENFILE, either.
ENFILE = object()
# Nor ENOMEM
ENOMEM = object()
EAGAIN = EWOULDBLOCK
from errno import WSAECONNRESET as ECONNABORTED
from twisted.python.win32 import formatError as strerror
else:
from errno import EPERM
from errno import EINVAL
from errno import EWOULDBLOCK
from errno import EINPROGRESS
from errno import EALREADY
from errno import ECONNRESET
from errno import EISCONN
from errno import ENOTCONN
from errno import EINTR
from errno import ENOBUFS
from errno import EMFILE
from errno import ENFILE
from errno import ENOMEM
from errno import EAGAIN
from errno import ECONNABORTED
from os import strerror
from errno import errorcode
# Twisted Imports
from twisted.internet import defer, base, address
from twisted.python import log, failure, reflect
from twisted.python.util import unsignedID
from twisted.internet.error import CannotListenError
from twisted.internet import abstract, main, interfaces, error
class _SocketCloser:
_socketShutdownMethod = 'shutdown'
def _closeSocket(self):
# socket.close() doesn't *really* close if there's another reference
# to it in the TCP/IP stack, e.g. if it was was inherited by a
# subprocess. And we really do want to close the connection. So we
# use shutdown() instead, and then close() in order to release the
# filedescriptor.
skt = self.socket
try:
getattr(skt, self._socketShutdownMethod)(2)
except socket.error:
pass
try:
skt.close()
except socket.error:
pass
class _TLSMixin:
_socketShutdownMethod = 'sock_shutdown'
writeBlockedOnRead = 0
readBlockedOnWrite = 0
_userWantRead = _userWantWrite = True
def getPeerCertificate(self):
return self.socket.get_peer_certificate()
def doRead(self):
if self.disconnected:
# See the comment in the similar check in doWrite below.
# Additionally, in order for anything other than returning
# CONNECTION_DONE here to make sense, it will probably be necessary
# to implement a way to switch back to TCP from TLS (actually, if
# we did something other than return CONNECTION_DONE, that would be
# a big part of implementing that feature). In other words, the
# expectation is that doRead will be called when self.disconnected
# is True only when the connection has been lost. It's possible
# that the other end could stop speaking TLS and then send us some
# non-TLS data. We'll end up ignoring that data and dropping the
# connection. There's no unit tests for this check in the cases
# where it makes a difference. The test suite only hits this
# codepath when it would have otherwise hit the SSL.ZeroReturnError
# exception handler below, which has exactly the same behavior as
# this conditional. Maybe that's the only case that can ever be
# triggered, I'm not sure. -exarkun
return main.CONNECTION_DONE
if self.writeBlockedOnRead:
self.writeBlockedOnRead = 0
self._resetReadWrite()
try:
return Connection.doRead(self)
except SSL.ZeroReturnError:
return main.CONNECTION_DONE
except SSL.WantReadError:
return
except SSL.WantWriteError:
self.readBlockedOnWrite = 1
Connection.startWriting(self)
Connection.stopReading(self)
return
except SSL.SysCallError, (retval, desc):
if ((retval == -1 and desc == 'Unexpected EOF')
or retval > 0):
return main.CONNECTION_LOST
log.err()
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def doWrite(self):
# Retry disconnecting
if self.disconnected:
# This case is triggered when "disconnected" is set to True by a
# call to _postLoseConnection from FileDescriptor.doWrite (to which
# we upcall at the end of this overridden version of that API). It
# means that while, as far as any protocol connected to this
# transport is concerned, the connection no longer exists, the
# connection *does* actually still exist. Instead of closing the
# connection in the overridden _postLoseConnection, we probably
# tried (and failed) to send a TLS close alert. The TCP connection
# is still up and we're waiting for the socket to become writeable
# enough for the TLS close alert to actually be sendable. Only
# then will the connection actually be torn down. -exarkun
return self._postLoseConnection()
if self._writeDisconnected:
return self._closeWriteConnection()
if self.readBlockedOnWrite:
self.readBlockedOnWrite = 0
self._resetReadWrite()
return Connection.doWrite(self)
def writeSomeData(self, data):
try:
return Connection.writeSomeData(self, data)
except SSL.WantWriteError:
return 0
except SSL.WantReadError:
self.writeBlockedOnRead = 1
Connection.stopWriting(self)
Connection.startReading(self)
return 0
except SSL.ZeroReturnError:
return main.CONNECTION_LOST
except SSL.SysCallError, e:
if e[0] == -1 and data == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
else:
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def _postLoseConnection(self):
"""
Gets called after loseConnection(), after buffered data is sent.
We try to send an SSL shutdown alert, but if it doesn't work, retry
when the socket is writable.
"""
# Here, set "disconnected" to True to trick higher levels into thinking
# the connection is really gone. It's not, and we're not going to
# close it yet. Instead, we'll try to send a TLS close alert to shut
# down the TLS connection cleanly. Only after we actually get the
# close alert into the socket will we disconnect the underlying TCP
# connection.
self.disconnected = True
if hasattr(self.socket, 'set_shutdown'):
# If possible, mark the state of the TLS connection as having
# already received a TLS close alert from the peer. Why do
# this???
self.socket.set_shutdown(SSL.RECEIVED_SHUTDOWN)
return self._sendCloseAlert()
def _sendCloseAlert(self):
# Okay, *THIS* is a bit complicated.
# Basically, the issue is, OpenSSL seems to not actually return
# errors from SSL_shutdown. Therefore, the only way to
# determine if the close notification has been sent is by
# SSL_shutdown returning "done". However, it will not claim it's
# done until it's both sent *and* received a shutdown notification.
# I don't actually want to wait for a received shutdown
# notification, though, so, I have to set RECEIVED_SHUTDOWN
# before calling shutdown. Then, it'll return True once it's
# *SENT* the shutdown.
# However, RECEIVED_SHUTDOWN can't be left set, because then
# reads will fail, breaking half close.
# Also, since shutdown doesn't report errors, an empty write call is
# done first, to try to detect if the connection has gone away.
# (*NOT* an SSL_write call, because that fails once you've called
# shutdown)
try:
os.write(self.socket.fileno(), '')
except OSError, se:
if se.args[0] in (EINTR, EWOULDBLOCK, ENOBUFS):
return 0
# Write error, socket gone
return main.CONNECTION_LOST
try:
if hasattr(self.socket, 'set_shutdown'):
laststate = self.socket.get_shutdown()
self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
done = self.socket.shutdown()
if not (laststate & SSL.RECEIVED_SHUTDOWN):
self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
else:
#warnings.warn("SSL connection shutdown possibly unreliable, "
# "please upgrade to ver 0.XX", category=UserWarning)
self.socket.shutdown()
done = True
except SSL.Error, e:
return e
if done:
self.stopWriting()
# Note that this is tested for by identity below.
return main.CONNECTION_DONE
else:
# For some reason, the close alert wasn't sent. Start writing
# again so that we'll get another chance to send it.
self.startWriting()
# On Linux, select will sometimes not report a closed file
# descriptor in the write set (in particular, it seems that if a
# send() fails with EPIPE, the socket will not appear in the write
# set). The shutdown call above (which calls down to SSL_shutdown)
# may have swallowed a write error. Therefore, also start reading
# so that if the socket is closed we will notice. This doesn't
# seem to be a problem for poll (because poll reports errors
# separately) or with select on BSD (presumably because, unlike
# Linux, it doesn't implement select in terms of poll and then map
# POLLHUP to select's in fd_set).
self.startReading()
return None
def _closeWriteConnection(self):
result = self._sendCloseAlert()
if result is main.CONNECTION_DONE:
return Connection._closeWriteConnection(self)
return result
def startReading(self):
self._userWantRead = True
if not self.readBlockedOnWrite:
return Connection.startReading(self)
def stopReading(self):
self._userWantRead = False
if not self.writeBlockedOnRead:
return Connection.stopReading(self)
def startWriting(self):
self._userWantWrite = True
if not self.writeBlockedOnRead:
return Connection.startWriting(self)
def stopWriting(self):
self._userWantWrite = False
if not self.readBlockedOnWrite:
return Connection.stopWriting(self)
def _resetReadWrite(self):
# After changing readBlockedOnWrite or writeBlockedOnRead,
# call this to reset the state to what the user requested.
if self._userWantWrite:
self.startWriting()
else:
self.stopWriting()
if self._userWantRead:
self.startReading()
else:
self.stopReading()
class _TLSDelayed(object):
"""
State tracking record for TLS startup parameters. Used to remember how
TLS should be started when starting it is delayed to wait for the output
buffer to be flushed.
@ivar bufferedData: A C{list} which contains all the data which was
written to the transport after an attempt to start TLS was made but
before the buffers outstanding at that time could be flushed and TLS
could really be started. This is appended to by the transport's
write and writeSequence methods until it is possible to actually
start TLS, then it is written to the TLS-enabled transport.
@ivar context: An SSL context factory object to use to start TLS.
@ivar extra: An extra argument to pass to the transport's C{startTLS}
method.
"""
def __init__(self, bufferedData, context, extra):
self.bufferedData = bufferedData
self.context = context
self.extra = extra
def _getTLSClass(klass, _existing={}):
if klass not in _existing:
class TLSConnection(_TLSMixin, klass):
implements(interfaces.ISSLTransport)
_existing[klass] = TLSConnection
return _existing[klass]
class Connection(abstract.FileDescriptor, _SocketCloser):
"""
Superclass of all socket-based FileDescriptors.
This is an abstract superclass of all objects which represent a TCP/IP
connection based socket.
@ivar logstr: prefix used when logging events related to this connection.
@type logstr: C{str}
"""
implements(interfaces.ITCPTransport, interfaces.ISystemHandle)
TLS = 0
def __init__(self, skt, protocol, reactor=None):
abstract.FileDescriptor.__init__(self, reactor=reactor)
self.socket = skt
self.socket.setblocking(0)
self.fileno = skt.fileno
self.protocol = protocol
if SSL:
_tlsWaiting = None
def startTLS(self, ctx, extra):
assert not self.TLS
if self.dataBuffer or self._tempDataBuffer:
# pre-TLS bytes are still being written. Starting TLS now
# will do the wrong thing. Instead, mark that we're trying
# to go into the TLS state.
self._tlsWaiting = _TLSDelayed([], ctx, extra)
return False
self.stopReading()
self.stopWriting()
self._startTLS()
self.socket = SSL.Connection(ctx.getContext(), self.socket)
self.fileno = self.socket.fileno
self.startReading()
return True
def _startTLS(self):
self.TLS = 1
self.__class__ = _getTLSClass(self.__class__)
def write(self, bytes):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.append(bytes)
else:
abstract.FileDescriptor.write(self, bytes)
def writeSequence(self, iovec):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.extend(iovec)
else:
abstract.FileDescriptor.writeSequence(self, iovec)
def doWrite(self):
result = abstract.FileDescriptor.doWrite(self)
if self._tlsWaiting is not None:
if not self.dataBuffer and not self._tempDataBuffer:
waiting = self._tlsWaiting
self._tlsWaiting = None
self.startTLS(waiting.context, waiting.extra)
self.writeSequence(waiting.bufferedData)
return result
def getHandle(self):
"""Return the socket for this connection."""
return self.socket
def doRead(self):
"""Calls self.protocol.dataReceived with all available data.
This reads up to self.bufferSize bytes of data from its socket, then
calls self.dataReceived(data) to process it. If the connection is not
lost through an error in the physical recv(), this function will return
the result of the dataReceived call.
"""
try:
data = self.socket.recv(self.bufferSize)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
if not data:
return main.CONNECTION_DONE
return self.protocol.dataReceived(data)
def writeSomeData(self, data):
"""Connection.writeSomeData(data) -> #of bytes written | CONNECTION_LOST
This writes as much data as possible to the socket and returns either
the number of bytes read (which is positive) or a connection error code
(which is negative)
"""
try:
# Limit length of buffer to try to send, because some OSes are too
# stupid to do so themselves (ahem windows)
return self.socket.send(buffer(data, 0, self.SEND_LIMIT))
except socket.error, se:
if se.args[0] == EINTR:
return self.writeSomeData(data)
elif se.args[0] in (EWOULDBLOCK, ENOBUFS):
return 0
else:
return main.CONNECTION_LOST
def _closeWriteConnection(self):
try:
getattr(self.socket, self._socketShutdownMethod)(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
"""See abstract.FileDescriptor.connectionLost().
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._closeSocket()
protocol = self.protocol
del self.protocol
del self.socket
del self.fileno
protocol.connectionLost(reason)
logstr = "Uninitialized"
def logPrefix(self):
"""Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if SSL:
classImplements(Connection, interfaces.ITLSTransport)
class BaseClient(Connection):
"""A base class for client TCP (and similiar) sockets.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
def _finishInit(self, whenDone, skt, error, reactor):
"""Called by base classes to continue to next stage of initialization."""
if whenDone:
Connection.__init__(self, skt, None, reactor)
self.doWrite = self.doConnect
self.doRead = self.doConnect
reactor.callLater(0, whenDone)
else:
reactor.callLater(0, self.failIfNotConnected, error)
def startTLS(self, ctx, client=1):
if Connection.startTLS(self, ctx, client):
if client:
self.socket.set_connect_state()
else:
self.socket.set_accept_state()
def stopConnecting(self):
"""Stop attempt to connect."""
self.failIfNotConnected(error.UserError())
def failIfNotConnected(self, err):
"""
Generic method called when the attemps to connect failed. It basically
cleans everything it can: call connectionFailed, stop read and write,
delete socket related members.
"""
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
self.connector.connectionFailed(failure.Failure(err))
if hasattr(self, "reactor"):
# this doesn't happen if we failed in __init__
self.stopReading()
self.stopWriting()
del self.connector
try:
self._closeSocket()
except AttributeError:
pass
else:
del self.socket, self.fileno
def createInternetSocket(self):
"""(internal) Create a non-blocking socket using
self.addressFamily, self.socketType.
"""
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
if fcntl and hasattr(fcntl, 'FD_CLOEXEC'):
old = fcntl.fcntl(s.fileno(), fcntl.F_GETFD)
fcntl.fcntl(s.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
return s
def resolveAddress(self):
if abstract.isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def doConnect(self):
"""I connect the socket.
Then, call the protocol's makeConnection, and start waiting for data.
"""
if not hasattr(self, "connector"):
# this happens when connection failed but doConnect
# was scheduled via a callLater in self._finishInit
return
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err:
self.failIfNotConnected(error.getConnectError((err, strerror(err))))
return
# doConnect gets called twice. The first time we actually need to
# start the connection attempt. The second time we don't really
# want to (SO_ERROR above will have taken care of any errors, and if
# it reported none, the mere fact that doConnect was called again is
# sufficient to indicate that the connection has succeeded), but it
# is not /particularly/ detrimental to do so. This should get
# cleaned up some day, though.
try:
connectResult = self.socket.connect_ex(self.realAddress)
except socket.error, se:
connectResult = se.args[0]
if connectResult:
if connectResult == EISCONN:
pass
# on Windows EINVAL means sometimes that we should keep trying:
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
elif ((connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or
(connectResult == EINVAL and platformType == "win32")):
self.startReading()
self.startWriting()
return
else:
self.failIfNotConnected(error.getConnectError((connectResult, strerror(connectResult))))
return
# If I have reached this point without raising or returning, that means
# that the socket is connected.
del self.doWrite
del self.doRead
# we first stop and then start, to reset any references to the old doRead
self.stopReading()
self.stopWriting()
self._connectDone()
def _connectDone(self):
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = 1
self.logstr = self.protocol.__class__.__name__ + ",client"
self.startReading()
self.protocol.makeConnection(self)
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError(string=reason))
else:
Connection.connectionLost(self, reason)
self.connector.connectionLost(reason)
class Client(BaseClient):
"""A TCP client."""
def __init__(self, host, port, bindAddress, connector, reactor=None):
# BaseClient.__init__ is invoked later
self.connector = connector
self.addr = (host, port)
whenDone = self.resolveAddress
err = None
skt = None
try:
skt = self.createInternetSocket()
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
if whenDone and bindAddress is not None:
try:
skt.bind(bindAddress)
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
self._finishInit(whenDone, skt, err, reactor)
def getHost(self):
"""Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the address that I am connected to.
"""
return address.IPv4Address('TCP', *(self.realAddress + ('INET',)))
def __repr__(self):
s = '<%s to %s at %x>' % (self.__class__, self.addr, unsignedID(self))
return s
class Server(Connection):
"""
Serverside socket-stream connection class.
This is a serverside network connection transport; a socket which came from
an accept() on a server.
"""
def __init__(self, sock, protocol, client, server, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize it with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.server = server
self.client = client
self.sessionno = sessionno
self.hostname = client[0]
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__,
sessionno,
self.hostname)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno,
self.server._realPortNumber)
self.startReading()
self.connected = 1
def __repr__(self):
"""A string representation of this connection.
"""
return self.repstr
def startTLS(self, ctx, server=1):
if Connection.startTLS(self, ctx, server):
if server:
self.socket.set_accept_state()
else:
self.socket.set_connect_state()
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the client's address.
"""
return address.IPv4Address('TCP', *(self.client + ('INET',)))
class Port(base.BasePort, _SocketCloser):
"""
A TCP server port, listening for connections.
When a connection is accepted, this will call a factory's buildProtocol
with the incoming address as an argument, according to the specification
described in L{twisted.internet.interfaces.IProtocolFactory}.
If you wish to change the sort of transport that will be used, the
C{transport} attribute will be called with the signature expected for
C{Server.__init__}, so it can be replaced.
@ivar deferred: a deferred created when L{stopListening} is called, and
that will fire when connection is lost. This is not to be used it
directly: prefer the deferred returned by L{stopListening} instead.
@type deferred: L{defer.Deferred}
@ivar disconnecting: flag indicating that the L{stopListening} method has
been called and that no connections should be accepted anymore.
@type disconnecting: C{bool}
@ivar connected: flag set once the listen has successfully been called on
the socket.
@type connected: C{bool}
"""
implements(interfaces.IListeningPort)
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
transport = Server
sessionno = 0
interface = ''
backlog = 50
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
"""Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor=reactor)
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
def createInternetSocket(self):
s = base.BasePort.createInternetSocket(self)
if platformType == "posix" and sys.platform != "cygwin":
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s
def startListening(self):
"""Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error, le:
raise CannotListenError, (self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
# The order of the next 6 lines is kind of bizarre. If no one
# can explain it, perhaps we should re-arrange them.
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _buildAddr(self, (host, port)):
return address._ServerFactoryIPv4Address('TCP', host, port)
def doRead(self):
"""Called when my socket is ready for reading.
This accepts a connection and calls self.protocol() to handle the
wire-level protocol.
"""
try:
if platformType == "posix":
numAccepts = self.numberAccepts
else:
# win32 event loop breaks if we do more than one accept()
# in an iteration of the event loop.
numAccepts = 1
for i in range(numAccepts):
# we need this so we can deal with a factory's buildProtocol
# calling our loseConnection
if self.disconnecting:
return
try:
skt, addr = self.socket.accept()
except socket.error, e:
if e.args[0] in (EWOULDBLOCK, EAGAIN):
self.numberAccepts = i
break
elif e.args[0] == EPERM:
# Netfilter on Linux may have rejected the
# connection, but we get told to try to accept()
# anyway.
continue
elif e.args[0] in (EMFILE, ENOBUFS, ENFILE, ENOMEM, ECONNABORTED):
# Linux gives EMFILE when a process is not allowed
# to allocate any more file descriptors. *BSD and
# Win32 give (WSA)ENOBUFS. Linux can also give
# ENFILE if the system is out of inodes, or ENOMEM
# if there is insufficient memory to allocate a new
# dentry. ECONNABORTED is documented as possible on
# both Linux and Windows, but it is not clear
# whether there are actually any circumstances under
# which it can happen (one might expect it to be
# possible if a client sends a FIN or RST after the
# server sends a SYN|ACK but before application code
# calls accept(2), however at least on Linux this
# _seems_ to be short-circuited by syncookies.
log.msg("Could not accept new connection (%s)" % (
errorcode[e.args[0]],))
break
raise
protocol = self.factory.buildProtocol(self._buildAddr(addr))
if protocol is None:
skt.close()
continue
s = self.sessionno
self.sessionno = s+1
transport = self.transport(skt, protocol, addr, self, s, self.reactor)
transport = self._preMakeConnection(transport)
protocol.makeConnection(transport)
else:
self.numberAccepts = self.numberAccepts+20
except:
# Note that in TLS mode, this will possibly catch SSL.Errors
# raised by self.socket.accept()
#
# There is no "except SSL.Error:" above because SSL may be
# None if there is no SSL support. In any case, all the
# "except SSL.Error:" suite would probably do is log.deferr()
# and return, so handling it here works just as well.
log.deferr()
def _preMakeConnection(self, transport):
return transport
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down the socket and call self.connectionLost(). It
returns a deferred which will fire successfully when the port is
actually closed.
"""
self.disconnecting = True
self.stopReading()
if self.connected:
self.deferred = defer.Deferred()
self.reactor.callLater(0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
log.msg('(Port %s Closed)' % self._realPortNumber)
self._realPortNumber = None
d = None
if hasattr(self, "deferred"):
d = self.deferred
del self.deferred
base.BasePort.connectionLost(self, reason)
self.connected = False
self._closeSocket()
del self.socket
del self.fileno
try:
self.factory.doStop()
except:
self.disconnecting = False
if d is not None:
d.errback(failure.Failure())
else:
raise
else:
self.disconnecting = False
if d is not None:
d.callback(None)
def logPrefix(self):
"""Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
self.host = host
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string="%s (%r)" % (e, port))
self.port = port
self.bindAddress = bindAddress
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'INET')
| {
"content_hash": "e13f0c80ba7d9633eb5e34043103e3e1",
"timestamp": "",
"source": "github",
"line_count": 1029,
"max_line_length": 108,
"avg_line_length": 36.2798833819242,
"alnum_prop": 0.6018161362905818,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "40c578a3f49330ad1a9de73e150c78f96067bfef",
"size": "37463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/internet/tcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
} |
from .build import PROPOSAL_GENERATOR_REGISTRY, build_proposal_generator
from .rpn import RPN_HEAD_REGISTRY, build_rpn_head, RPN, StandardRPNHead
__all__ = list(globals().keys())
| {
"content_hash": "93e8c8a82bbd3bb439a0e3d4caa82cc9",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 72,
"avg_line_length": 45,
"alnum_prop": 0.7666666666666667,
"repo_name": "facebookresearch/detectron2",
"id": "3f4e4df7645c67b7a013295207b98fe70b2e574c",
"size": "231",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "detectron2/modeling/proposal_generator/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "79417"
},
{
"name": "CMake",
"bytes": "616"
},
{
"name": "Cuda",
"bytes": "112955"
},
{
"name": "Dockerfile",
"bytes": "3209"
},
{
"name": "Python",
"bytes": "3261609"
},
{
"name": "Shell",
"bytes": "14448"
}
],
"symlink_target": ""
} |
import datetime
import json
import logging
import os
from google.appengine.ext.deferred import deferred
from google.appengine.ext.webapp import template
from rogerthat.bizz import channel
from rogerthat.bizz.rtemail import generate_auto_login_url
from rogerthat.consts import NEWS_MATCHING_QUEUE
from rogerthat.dal.profile import get_service_profile, get_service_visible
from rogerthat.dal.service import get_service_identities, count_users_connected_to_service_identity
from rogerthat.migrations.migrate_news_items import migrate_service
from rogerthat.models.news import NewsSettingsService, NewsItem, NewsGroup, \
NewsSettingsServiceGroup
from rogerthat.pages.admin.news import NewsAdminHandler
from rogerthat.rpc import users
from rogerthat.rpc.models import ServiceLog
def get_last_activity(service_user):
last_activity = ServiceLog.all().filter('user', service_user).order('-timestamp').get()
if not last_activity:
return None
return datetime.datetime.utcfromtimestamp((last_activity.timestamp / 1000))
def get_days_between_last_activity(d1):
d2 = datetime.datetime.utcnow()
return (d2 - d1).days
class SetupNewsServiceHandler(NewsAdminHandler):
def get(self):
community_id = self.request.get("community_id", None)
sni = self.request.get("sni", None)
if not community_id or not sni:
self.redirect('/mobiadmin/google/news')
return
if community_id:
community_id = long(community_id)
sni = int(sni)
qry = NewsSettingsService.list_setup_needed(community_id, sni)
item = qry.get()
if not item:
self.redirect('/mobiadmin/google/news?community_id=%s' % community_id)
return
sp = get_service_profile(item.service_user)
last_activity = get_last_activity(item.service_user)
latest_activity_days = get_days_between_last_activity(last_activity) if last_activity else -1
disabled_reason = None
if sp.solution and sp.solution == u'flex':
try:
from shop.models import Customer
c = Customer.get_by_service_email(item.service_user.email())
if c and c.service_disabled_at:
disabled_reason = c.disabled_reason
except:
pass
identities = []
total_user_count = 0
all_hidden = True
for si in get_service_identities(item.service_user):
news_count = NewsItem.query().filter(NewsItem.sender == si.user).count(None)
user_count = count_users_connected_to_service_identity(si.user)
total_user_count += user_count
service_visible = get_service_visible(si.user)
if service_visible and all_hidden:
all_hidden = False
identities.append(dict(id=si.identifier,
name=si.name,
news_count=news_count,
user_count=user_count,
search_enabled=service_visible))
delete_enabled = False
if disabled_reason:
delete_enabled = True
elif total_user_count == 0 and all_hidden:
delete_enabled = True
elif latest_activity_days > 300 and total_user_count < 20 and all_hidden:
delete_enabled = True
context = dict(sni=sni,
count=qry.count(None),
item=item,
sp=sp,
auto_login_url=generate_auto_login_url(item.service_user),
latest_activity=dict(date=str(last_activity) if last_activity else 'never', days=latest_activity_days),
delete_enabled=delete_enabled,
disabled_reason=disabled_reason,
identities=identities)
path = os.path.join(os.path.dirname(__file__), 'services_detail.html')
channel.append_firebase_params(context)
self.response.out.write(template.render(path, context))
def post(self):
logging.debug(self.request.POST)
data = self.request.get("data", None)
if not data:
self.redirect('/mobiadmin/google/news')
return
self.response.headers['Content-Type'] = 'text/json'
data = json.loads(data)
service_user_email = data.get("service_user_email", None)
action = data.get("action", None)
nss = NewsSettingsService.create_key(users.User(service_user_email)).get() # type: NewsSettingsService
if action == 'delete':
should_delete = False
try:
from shop.models import Customer
c = Customer.get_by_service_email(service_user_email)
if c and c.service_disabled_at:
should_delete = True
except:
pass
total_user_count = 0
for si in get_service_identities(nss.service_user):
user_count = count_users_connected_to_service_identity(si.user)
total_user_count += user_count
if total_user_count == 0:
should_delete = True
last_activity = get_last_activity(nss.service_user)
if not should_delete and not last_activity:
self.response.out.write(json.dumps({'success': False,
'errormsg': 'Delete failed could not find last activity'}))
return
latest_activity_days = get_days_between_last_activity(last_activity) if last_activity else -1
if not should_delete and latest_activity_days <= 300:
self.response.out.write(json.dumps({'success': False,
'errormsg': 'Service was active in the last 300 days'}))
return
nss.setup_needed_id = 998
nss.put()
service_profile = get_service_profile(nss.service_user, False)
if service_profile.solution:
from solutions.common.bizz.jobs import delete_solution
delete_solution(nss.service_user, True)
else:
from rogerthat.bizz.job import delete_service
delete_service.job(nss.service_user, nss.service_user)
elif action == 'skip':
nss.setup_needed_id = 999
nss.put()
elif action == 'save_group':
groups = data.get("groups", None)
if not groups:
self.response.out.write(json.dumps({'success': False,
'errormsg': 'This is awkward... (groups not found)'}))
return
group_types = [ng.group_type for ng in NewsGroup.list_by_community_id(nss.community_id)]
nss.setup_needed_id = 0
nss.groups = []
if groups == 'city':
if NewsGroup.TYPE_CITY in group_types:
nss.groups.append(NewsSettingsServiceGroup(group_type=NewsGroup.TYPE_CITY))
if NewsGroup.TYPE_TRAFFIC in group_types:
nss.groups.append(NewsSettingsServiceGroup(group_type=NewsGroup.TYPE_TRAFFIC))
if NewsGroup.TYPE_EVENTS in group_types:
nss.groups.append(NewsSettingsServiceGroup(group_type=NewsGroup.TYPE_EVENTS))
elif groups == 'other':
if NewsGroup.TYPE_PROMOTIONS in group_types:
nss.groups.append(NewsSettingsServiceGroup(group_type=NewsGroup.TYPE_PROMOTIONS))
if NewsGroup.TYPE_EVENTS in group_types:
nss.groups.append(NewsSettingsServiceGroup(group_type=NewsGroup.TYPE_EVENTS))
else:
self.response.out.write(json.dumps({'success': False,
'errormsg': 'This is awkward... (group not found)'}))
return
if not nss.groups:
logging.debug(group_types)
self.response.out.write(json.dumps({'success': False,
'errormsg': 'This is awkward... (no group matches)'}))
return
nss.put()
deferred.defer(migrate_service, nss.service_user, dry_run=False, force=True, _countdown=5, _queue=NEWS_MATCHING_QUEUE)
self.response.out.write(json.dumps({'success': True}))
class ListNewsServiceHandler(NewsAdminHandler):
def get(self):
community_id = self.request.get("community_id", None)
sni = self.request.get("sni", None)
if not community_id or not sni:
self.redirect('/mobiadmin/google/news')
return
if community_id:
community_id = long(community_id)
sni = int(sni)
qry = NewsSettingsService.list_setup_needed(community_id, sni)
items = []
for nss in qry:
last_activity = get_last_activity(nss.service_user)
latest_activity_days = get_days_between_last_activity(last_activity) if last_activity else -1
identities = []
for si in get_service_identities(nss.service_user):
news_count = NewsItem.query().filter(NewsItem.sender == si.user).count(None)
service_visible = get_service_visible(si.user)
identities.append(dict(id=si.identifier,
name=si.name,
news_count=news_count,
search_enabled=service_visible))
items.append(dict(service_user_email=nss.service_user.email(),
latest_activity=dict(date=str(last_activity) if last_activity else 'never', days=latest_activity_days),
identities=identities))
context = dict(items=items)
path = os.path.join(os.path.dirname(__file__), 'services_list.html')
channel.append_firebase_params(context)
self.response.out.write(template.render(path, context))
| {
"content_hash": "5a3e0724a7057bcdb1f516540740671a",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 133,
"avg_line_length": 43.037974683544306,
"alnum_prop": 0.5790196078431372,
"repo_name": "our-city-app/oca-backend",
"id": "a753d36a4c79442e23e3035f9665382180dc80b4",
"size": "10840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/pages/admin/news/services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
from flask import (
Blueprint,
current_app,
jsonify,
request
)
from flask_jwt_extended import jwt_required
from app.dao.fees_dao import dao_create_fee, dao_get_fees, dao_update_fee, dao_get_fee_by_id
from app.errors import register_errors
from app.routes.fees.schemas import post_create_fee_schema, post_update_fee_schema
from app.models import Fee
from app.schema_validation import validate
fees_blueprint = Blueprint('fees', __name__)
fee_blueprint = Blueprint('fee', __name__)
register_errors(fees_blueprint)
register_errors(fee_blueprint)
@fees_blueprint.route('/fees')
@jwt_required
def get_fees():
current_app.logger.info('get_fees')
fees = [f.serialize() if f else None for f in dao_get_fees()]
return jsonify(fees)
@fee_blueprint.route('/fee/<uuid:fee_id>', methods=['GET'])
@jwt_required
def get_fee_by_id(fee_id):
current_app.logger.info('get_fee: {}'.format(fee_id))
fee = dao_get_fee_by_id(fee_id)
return jsonify(fee.serialize())
@fee_blueprint.route('/fee', methods=['POST'])
@jwt_required
def create_fee():
data = request.get_json()
validate(data, post_create_fee_schema)
fee = Fee(**data)
dao_create_fee(fee)
return jsonify(fee.serialize()), 201
@fee_blueprint.route('/fee/<uuid:fee_id>', methods=['POST'])
@jwt_required
def update_fee(fee_id):
data = request.get_json()
validate(data, post_update_fee_schema)
fetched_fee = dao_get_fee_by_id(fee_id)
dao_update_fee(fee_id, **data)
return jsonify(fetched_fee.serialize()), 200
| {
"content_hash": "e3c827d3041222007bb34bab085d7b0d",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 92,
"avg_line_length": 24.822580645161292,
"alnum_prop": 0.6900584795321637,
"repo_name": "NewAcropolis/api",
"id": "6b5912d02af13351113f8ab43897142437ef3391",
"size": "1539",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/routes/fees/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "10421"
},
{
"name": "Makefile",
"bytes": "1369"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "791740"
},
{
"name": "Shell",
"bytes": "66108"
}
],
"symlink_target": ""
} |
try:
os.remove('somefile.tmp')
except OSError:
pass
with ignored(OSError):
os.remove('somefile.tmp')
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
| {
"content_hash": "e6665fbba5aed2bed990bc3d20db7af5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 29,
"avg_line_length": 15.428571428571429,
"alnum_prop": 0.6388888888888888,
"repo_name": "klose911/klose911.github.io",
"id": "d9fd05b0b81e9dfffe41a9b6e3029095abce3d85",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/src/pythonic/decorator/ignore_exception.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6871"
},
{
"name": "CSS",
"bytes": "1874044"
},
{
"name": "Emacs Lisp",
"bytes": "108219"
},
{
"name": "Erlang",
"bytes": "76979"
},
{
"name": "Go",
"bytes": "47318"
},
{
"name": "HTML",
"bytes": "2982404"
},
{
"name": "Java",
"bytes": "183096"
},
{
"name": "Kotlin",
"bytes": "123031"
},
{
"name": "Python",
"bytes": "52944"
},
{
"name": "Rust",
"bytes": "75214"
},
{
"name": "Scheme",
"bytes": "305426"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
# Load the dataset
X = pd.read_csv('titanic_data.csv')
# Limit to numeric data
X = X._get_numeric_data()
# Separate the labels
y = X['Survived']
# Remove labels from the inputs, and age due to missing data
del X['Age'], X['Survived']
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
# TODO: split the data into training and testing sets,
# using the standard settings for train_test_split.
# Then, train and test the classifiers with your newly split data instead of X and y.
# The decision tree classifier
clf1 = DecisionTreeClassifier()
clf1.fit(X,y)
print "Decision Tree has accuracy: ",accuracy_score(clf1.predict(X),y)
# The naive Bayes classifier
clf2 = GaussianNB()
clf2.fit(X,y)
print "GaussianNB has accuracy: ",accuracy_score(clf2.predict(X),y)
answer = {
"Naive Bayes Score": 0,
"Decision Tree Score": 0
}
| {
"content_hash": "b88001bf4cb877b971e926a0d7e526f9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 85,
"avg_line_length": 27.91176470588235,
"alnum_prop": 0.7492096944151738,
"repo_name": "harish-garg/Machine-Learning",
"id": "a3fe35668e315a14c2508e3399502ac62af2ff8e",
"size": "1541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "udacity/evaluation_metrics/evaluate_accuracy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "456078"
},
{
"name": "HTML",
"bytes": "573730"
},
{
"name": "Jupyter Notebook",
"bytes": "496068"
},
{
"name": "Python",
"bytes": "87423"
}
],
"symlink_target": ""
} |
"""
Dumps master config as JSON.
Uses master_cfg_utils.LoadConfig, which should be called at most once
in the same process. That's why this is a separate utility.
"""
import argparse
import json
import os
import sys
SCRIPTS_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
if not SCRIPTS_DIR in sys.path:
sys.path.insert(0, SCRIPTS_DIR)
from common import env
env.Install()
from common import master_cfg_utils
from master.factory.build_factory import BuildFactory
class BuildbotJSONEncoder(json.JSONEncoder):
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, BuildFactory):
return {'repr': repr(obj), 'properties': obj.properties.asDict()}
return repr(obj)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('master')
parser.add_argument('output', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args(argv)
result = master_cfg_utils.LoadConfig(args.master)
json.dump(result['BuildmasterConfig'],
args.output,
cls=BuildbotJSONEncoder,
indent=4)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| {
"content_hash": "cdefc607241fff1e7a20f3e88053c71d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 80,
"avg_line_length": 23.66,
"alnum_prop": 0.7016060862214708,
"repo_name": "eunchong/build",
"id": "54d4420087da971b8658f0223edf12aceedae1f2",
"size": "1368",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/tools/dump_master_cfg.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3128"
},
{
"name": "CSS",
"bytes": "211818"
},
{
"name": "HTML",
"bytes": "429981"
},
{
"name": "JavaScript",
"bytes": "75624"
},
{
"name": "Makefile",
"bytes": "21204"
},
{
"name": "Python",
"bytes": "6143109"
},
{
"name": "Shell",
"bytes": "23512"
}
],
"symlink_target": ""
} |
from typing import List, Tuple
import os
import numpy as np
from pymorphy2 import MorphAnalyzer
from russian_tagsets import converters
from keras.layers import Input, Embedding, Dense, LSTM, BatchNormalization, Activation, \
concatenate, Bidirectional, TimeDistributed, Dropout
from keras.models import Model, model_from_json
try:
from keras.optimizers import Adam
except:
from keras.optimizer_v2.adam import Adam
from keras import backend as K
from rnnmorph.batch_generator import BatchGenerator
from rnnmorph.data_preparation.grammeme_vectorizer import GrammemeVectorizer
from rnnmorph.data_preparation.word_vocabulary import WordVocabulary
from rnnmorph.data_preparation.loader import Loader
from rnnmorph.char_embeddings_model import build_dense_chars_layer, get_char_model
from rnnmorph.config import BuildModelConfig, TrainConfig
class ReversedLSTM(LSTM):
def __init__(self, units, **kwargs):
kwargs['go_backwards'] = True
super().__init__(units, **kwargs)
def call(self, inputs, **kwargs):
y_rev = super().call(inputs, **kwargs)
return K.reverse(y_rev, 1)
class LSTMMorphoAnalysis:
def __init__(self, language: str):
self.language = language # type: str
self.morph = MorphAnalyzer() if language == "ru" else None # type: MorphAnalyzer
self.converter = converters.converter('opencorpora-int', 'ud14') if self.language == "ru" else None
self.grammeme_vectorizer_input = GrammemeVectorizer() # type: GrammemeVectorizer
self.grammeme_vectorizer_output = GrammemeVectorizer() # type: GrammemeVectorizer
self.word_vocabulary = WordVocabulary() # type: WordVocabulary
self.char_set = "" # type: str
self.train_model = None # type: Model
self.eval_model = None # type: Model
def prepare(self, gram_dump_path_input: str, gram_dump_path_output: str,
word_vocabulary_dump_path: str, char_set_dump_path: str,
file_names: List[str] = None) -> None:
"""
Подготовка векторизатора грамматических значений и словаря слов по корпусу.
"""
if os.path.exists(gram_dump_path_input):
self.grammeme_vectorizer_input.load(gram_dump_path_input)
if os.path.exists(gram_dump_path_output):
self.grammeme_vectorizer_output.load(gram_dump_path_output)
if os.path.exists(word_vocabulary_dump_path):
self.word_vocabulary.load(word_vocabulary_dump_path)
if os.path.exists(char_set_dump_path):
with open(char_set_dump_path, 'r', encoding='utf-8') as f:
self.char_set = f.read().rstrip()
if self.grammeme_vectorizer_input.is_empty() or \
self.grammeme_vectorizer_output.is_empty() or \
self.word_vocabulary.is_empty() or\
not self.char_set:
loader = Loader(self.language)
loader.parse_corpora(file_names)
self.grammeme_vectorizer_input = loader.grammeme_vectorizer_input
self.grammeme_vectorizer_input.save(gram_dump_path_input)
self.grammeme_vectorizer_output = loader.grammeme_vectorizer_output
self.grammeme_vectorizer_output.save(gram_dump_path_output)
self.word_vocabulary = loader.word_vocabulary
self.word_vocabulary.save(word_vocabulary_dump_path)
self.char_set = loader.char_set
with open(char_set_dump_path, 'w', encoding='utf-8') as f:
f.write(self.char_set)
def save(self, model_config_path: str, model_weights_path: str,
eval_model_config_path: str, eval_model_weights_path: str):
if self.eval_model is not None:
with open(eval_model_config_path, "w", encoding='utf-8') as f:
f.write(self.eval_model.to_json())
self.eval_model.save_weights(eval_model_weights_path)
if self.train_model is not None:
with open(model_config_path, "w", encoding='utf-8') as f:
f.write(self.train_model.to_json())
self.train_model.save_weights(model_weights_path)
def load_train(self, config: BuildModelConfig, model_config_path: str=None, model_weights_path: str=None):
with open(model_config_path, "r", encoding='utf-8') as f:
if config.use_crf:
from keras_contrib.layers import CRF
custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
self.train_model = model_from_json(f.read(), custom_objects=custom_objects)
else:
custom_objects = {'ReversedLSTM': ReversedLSTM}
self.train_model = model_from_json(f.read(), custom_objects=custom_objects)
self.train_model.load_weights(model_weights_path)
loss = {}
metrics = {}
if config.use_crf:
out_layer_name = 'crf'
offset = 0
if config.use_pos_lm:
offset += 2
if config.use_word_lm:
offset += 2
loss[out_layer_name] = self.train_model.layers[-1-offset].loss_function
metrics[out_layer_name] = self.train_model.layers[-1-offset].accuracy
else:
out_layer_name = 'main_pred'
loss[out_layer_name] = 'sparse_categorical_crossentropy'
metrics[out_layer_name] = 'accuracy'
if config.use_pos_lm:
prev_layer_name = 'shifted_pred_prev'
next_layer_name = 'shifted_pred_next'
loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'
metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'
self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)
self.eval_model = Model(inputs=self.train_model.inputs, outputs=self.train_model.outputs[0])
def load_eval(self, config: BuildModelConfig, eval_model_config_path: str,
eval_model_weights_path: str) -> None:
with open(eval_model_config_path, "r", encoding='utf-8') as f:
if config.use_crf:
from keras_contrib.layers import CRF
custom_objects = {'ReversedLSTM': ReversedLSTM, 'CRF': CRF}
self.eval_model = model_from_json(f.read(), custom_objects=custom_objects)
else:
custom_objects = {'ReversedLSTM': ReversedLSTM}
self.eval_model = model_from_json(f.read(), custom_objects=custom_objects)
self.eval_model.load_weights(eval_model_weights_path)
def build(self, config: BuildModelConfig, word_embeddings=None):
"""
Описание модели.
:param config: конфиг модели.
:param word_embeddings: матрица словных эмбеддингов.
"""
inputs = []
embeddings = []
if config.use_word_embeddings and word_embeddings is not None:
words = Input(shape=(None,), name='words')
word_vocabulary_size = word_embeddings.size.shape[0]
word_embeddings_dim = word_embeddings.size.shape[1]
words_embedding = Embedding(word_vocabulary_size, word_embeddings_dim, name='word_embeddings')(words)
embeddings.append(words_embedding)
if config.use_gram:
grammemes_input = Input(shape=(None, self.grammeme_vectorizer_input.grammemes_count()), name='grammemes')
grammemes_embedding = Dropout(config.gram_dropout)(grammemes_input)
grammemes_embedding = Dense(config.gram_hidden_size, activation='relu')(grammemes_embedding)
inputs.append(grammemes_input)
embeddings.append(grammemes_embedding)
if config.use_chars:
chars_input = Input(shape=(None, config.char_max_word_length), name='chars')
char_layer = build_dense_chars_layer(
max_word_length=config.char_max_word_length,
char_vocab_size=len(self.char_set)+1,
char_emb_dim=config.char_embedding_dim,
hidden_dim=config.char_function_hidden_size,
output_dim=config.char_function_output_size,
dropout=config.char_dropout)
if config.use_trained_char_embeddings:
char_layer = get_char_model(
char_layer=char_layer,
max_word_length=config.char_max_word_length,
embeddings=word_embeddings,
model_config_path=config.char_model_config_path,
model_weights_path=config.char_model_weights_path,
vocabulary=self.word_vocabulary,
char_set=self.char_set)
chars_embedding = char_layer(chars_input)
inputs.append(chars_input)
embeddings.append(chars_embedding)
if len(embeddings) > 1:
layer = concatenate(embeddings, name="LSTM_input")
else:
layer = embeddings[0]
lstm_input = Dense(config.rnn_input_size, activation='relu')(layer)
lstm_forward_1 = LSTM(config.rnn_hidden_size, dropout=config.rnn_dropout,
recurrent_dropout=config.rnn_dropout, return_sequences=True,
name='LSTM_1_forward')(lstm_input)
lstm_backward_1 = ReversedLSTM(config.rnn_hidden_size, dropout=config.rnn_dropout,
recurrent_dropout=config.rnn_dropout, return_sequences=True,
name='LSTM_1_backward')(lstm_input)
layer = concatenate([lstm_forward_1, lstm_backward_1], name="BiLSTM_input")
for i in range(config.rnn_n_layers-1):
layer = Bidirectional(LSTM(
config.rnn_hidden_size,
dropout=config.rnn_dropout,
recurrent_dropout=config.rnn_dropout,
return_sequences=True,
name='LSTM_'+str(i)))(layer)
layer = TimeDistributed(Dense(config.dense_size))(layer)
layer = TimeDistributed(Dropout(config.dense_dropout))(layer)
layer = TimeDistributed(BatchNormalization())(layer)
layer = TimeDistributed(Activation('relu'))(layer)
outputs = []
loss = {}
metrics = {}
num_of_classes = self.grammeme_vectorizer_output.size() + 1
if config.use_crf:
from keras_contrib.layers import CRF
out_layer_name = 'crf'
crf_layer = CRF(num_of_classes, sparse_target=True, name=out_layer_name)
outputs.append(crf_layer(layer))
loss[out_layer_name] = crf_layer.loss_function
metrics[out_layer_name] = crf_layer.accuracy
else:
out_layer_name = 'main_pred'
outputs.append(Dense(num_of_classes, activation='softmax', name=out_layer_name)(layer))
loss[out_layer_name] = 'sparse_categorical_crossentropy'
metrics[out_layer_name] = 'accuracy'
if config.use_pos_lm:
prev_layer_name = 'shifted_pred_prev'
next_layer_name = 'shifted_pred_next'
prev_layer = Dense(num_of_classes, activation='softmax', name=prev_layer_name)
next_layer = Dense(num_of_classes, activation='softmax', name=next_layer_name)
outputs.append(prev_layer(Dense(config.dense_size, activation='relu')(lstm_backward_1)))
outputs.append(next_layer(Dense(config.dense_size, activation='relu')(lstm_forward_1)))
loss[prev_layer_name] = loss[next_layer_name] = 'sparse_categorical_crossentropy'
metrics[prev_layer_name] = metrics[next_layer_name] = 'accuracy'
if config.use_word_lm:
out_layer_name = 'out_embedding'
out_embedding = Dense(word_embeddings.shape[0],
weights=[word_embeddings.T, np.zeros(word_embeddings.shape[0])],
activation='softmax', name=out_layer_name, trainable=False)
outputs.append(out_embedding(Dense(word_embeddings.shape[1], activation='relu')(lstm_backward_1)))
outputs.append(out_embedding(Dense(word_embeddings.shape[1], activation='relu')(lstm_forward_1)))
loss[out_layer_name] = 'sparse_categorical_crossentropy'
metrics[out_layer_name] = 'accuracy'
self.train_model = Model(inputs=inputs, outputs=outputs)
self.train_model.compile(Adam(clipnorm=5.), loss=loss, metrics=metrics)
self.eval_model = Model(inputs=inputs, outputs=outputs[0])
print(self.train_model.summary())
def train(self, file_names: List[str], train_config: TrainConfig, build_config: BuildModelConfig) -> None:
np.random.seed(train_config.random_seed)
sample_counter = self.count_samples(file_names)
train_idx, val_idx = self.get_split(sample_counter, train_config.val_part)
for big_epoch in range(train_config.epochs_num):
print('------------Big Epoch {}------------'.format(big_epoch))
batch_generator = BatchGenerator(
language=self.language,
file_names=file_names,
config=train_config,
grammeme_vectorizer_input=self.grammeme_vectorizer_input,
grammeme_vectorizer_output=self.grammeme_vectorizer_output,
build_config=build_config,
indices=train_idx,
word_vocabulary=self.word_vocabulary,
char_set=self.char_set)
for epoch, (inputs, target) in enumerate(batch_generator):
self.train_model.fit(inputs, target, batch_size=train_config.batch_size, epochs=1, verbose=2)
if epoch != 0 and epoch % train_config.dump_model_freq == 0:
self.save(train_config.train_model_config_path, train_config.train_model_weights_path,
train_config.eval_model_config_path, train_config.eval_model_weights_path)
self.evaluate(
file_names=file_names,
val_idx=val_idx,
train_config=train_config,
build_config=build_config)
@staticmethod
def count_samples(file_names: List[str]):
"""
Считает количество предложений в выборке.
:param file_names: файлы выборки.
:return: количество предложений.
"""
sample_counter = 0
for filename in file_names:
with open(filename, "r", encoding='utf-8') as f:
for line in f:
line = line.strip()
if len(line) == 0:
sample_counter += 1
return sample_counter
@staticmethod
def get_split(sample_counter: int, val_part: float) -> Tuple[np.array, np.array]:
"""
Выдаёт индексы предложений, которые становятся train или val выборкой.
:param sample_counter: количество предложений.
:param val_part: часть выборки, которая станет val.
:return: индексы выборок.
"""
perm = np.random.permutation(sample_counter)
border = int(sample_counter * (1 - val_part))
train_idx = perm[:border]
val_idx = perm[border:]
return train_idx, val_idx
def evaluate(self, file_names, val_idx, train_config: TrainConfig, build_config: BuildModelConfig) -> None:
"""
Оценка на val выборке.
:param file_names: файлы выборки.
:param val_idx: val индексы.
:param train_config: конфиг обучения.
:param build_config: конфиг модели.
"""
word_count = 0
word_errors = 0
sentence_count = 0
sentence_errors = 0
batch_generator = BatchGenerator(
language=self.language,
file_names=file_names,
config=train_config,
grammeme_vectorizer_input=self.grammeme_vectorizer_input,
grammeme_vectorizer_output=self.grammeme_vectorizer_output,
build_config=build_config,
indices=val_idx,
word_vocabulary=self.word_vocabulary,
char_set=self.char_set)
for epoch, (inputs, target) in enumerate(batch_generator):
predicted_y = self.eval_model.predict(inputs, batch_size=train_config.batch_size, verbose=0)
for i, sentence in enumerate(target[0]):
sentence_has_errors = False
count_zero = sum([1 for num in sentence if num == [0]])
real_sentence_tags = sentence[count_zero:]
answer = []
for grammeme_probs in predicted_y[i][count_zero:]:
num = np.argmax(grammeme_probs)
answer.append(num)
for tag, predicted_tag in zip(real_sentence_tags, answer):
tag = tag[0]
word_count += 1
if tag != predicted_tag:
word_errors += 1
sentence_has_errors = True
sentence_count += 1
if sentence_has_errors:
sentence_errors += 1
print("Word accuracy: ", 1.0 - float(word_errors) / word_count)
print("Sentence accuracy: ", 1.0 - float(sentence_errors) / sentence_count)
def predict_probabilities(self, sentences: List[List[str]], batch_size: int,
build_config: BuildModelConfig) -> List[List[List[float]]]:
"""
Предсказание полных PoS-тегов по предложению с вероятностями всех вариантов.
:param sentences: массив предложений (которые являются массивом слов).
:param build_config: конфиг архитектуры модели.
:param batch_size: размер батча.
:return: вероятности тегов.
"""
max_sentence_len = max([len(sentence) for sentence in sentences])
if max_sentence_len == 0:
return [[] for _ in sentences]
n_samples = len(sentences)
words = np.zeros((n_samples, max_sentence_len), dtype=np.int)
grammemes = np.zeros((n_samples, max_sentence_len, self.grammeme_vectorizer_input.grammemes_count()),
dtype=np.float)
chars = np.zeros((n_samples, max_sentence_len, build_config.char_max_word_length), dtype=np.int)
for i, sentence in enumerate(sentences):
if not sentence:
continue
word_indices, gram_vectors, char_vectors = BatchGenerator.get_sample(
sentence,
language=self.language,
converter=self.converter,
morph=self.morph,
grammeme_vectorizer=self.grammeme_vectorizer_input,
max_word_len=build_config.char_max_word_length,
word_vocabulary=self.word_vocabulary,
word_count=build_config.word_max_count,
char_set=self.char_set)
words[i, -len(sentence):] = word_indices
grammemes[i, -len(sentence):] = gram_vectors
chars[i, -len(sentence):] = char_vectors
inputs = []
if build_config.use_word_embeddings:
inputs.append(words)
if build_config.use_gram:
inputs.append(grammemes)
if build_config.use_chars:
inputs.append(chars)
return self.eval_model.predict(inputs, batch_size=batch_size)
| {
"content_hash": "992de1f338743e33f69053cb6786028e",
"timestamp": "",
"source": "github",
"line_count": 404,
"max_line_length": 117,
"avg_line_length": 47.87376237623762,
"alnum_prop": 0.6022956413835893,
"repo_name": "IlyaGusev/rnnmorph",
"id": "0079a23bcf22a929e2832ec8de626270a4bbc4a4",
"size": "19993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rnnmorph/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91880"
},
{
"name": "Shell",
"bytes": "249"
}
],
"symlink_target": ""
} |
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# from preprocessing import cifarnet_preprocessing
# from preprocessing import inception_preprocessing
# from preprocessing import vgg_preprocessing
from preprocessing import ssd_vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'ssd_300_vgg': ssd_vgg_preprocessing,
'ssd_512_vgg': ssd_vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, labels, bboxes,
out_shape, data_format='NHWC', **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, labels, bboxes, out_shape, data_format=data_format,
is_training=is_training, **kwargs)
return preprocessing_fn
| {
"content_hash": "663f4b78c36ca2e2aa610b390129bdaf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 33.06521739130435,
"alnum_prop": 0.6870479947403024,
"repo_name": "LevinJ/SSD_tensorflow_VOC",
"id": "869784d4296e82e77873dba757616981e6215909",
"size": "2206",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "preprocessing/preprocessing_factory.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "408466"
}
],
"symlink_target": ""
} |
"""Autogenerated file, do not edit. Submit translations on Transifex."""
MESSAGES = {
"%d min remaining to read": "%d min demorantas de lectura",
"(active)": "(actiu)",
"Also available in:": "Tanben disponibla en :",
"Archive": "Archius",
"Atom feed": "Flux Atom",
"Authors": "Autors",
"Categories": "Categorias",
"Comments": "Comentaris",
"LANGUAGE": "Occitan",
"Languages:": "Lengas :",
"More posts about %s": "Mai de publicacion sus %s",
"Newer posts": "Publicacions mai recentas",
"Next post": "Publicacion seguenta",
"Next": "Seguent",
"No posts found.": "Cap de publicacion pas trobada.",
"Nothing found.": "Res pas trobat.",
"Older posts": "Publicacions mai ancianas",
"Original site": "Site original",
"Posted:": "Publicada lo :",
"Posts about %s": "Publicacions tocant %s",
"Posts by %s": "Publicat per %s",
"Posts for year %s": "Publicacions per annada %s",
"Posts for {month_day_year}": "Publicacions per {month_day_year}",
"Posts for {month_year}": "Publicacions per {month_year} ",
"Previous post": "Publicacion precedenta",
"Previous": "Precedent",
"Publication date": "Data de publicacion",
"RSS feed": "Flux RSS",
"Read in English": "Legir en occitan",
"Read more": "Ne legir mai",
"Skip to main content": "Passar al contengut màger",
"Source": "Font",
"Subcategories:": "Jos-categorias :",
"Tags and Categories": "Etiquetas e categorias",
"Tags": "Etiquetas",
"Toggle navigation": "Alternar la navigacion",
"Uncategorized": "Sens categoria",
"Up": "Amont",
"Updates": "Actualizacions",
"Write your page here.": "Escrivètz vòstra pagina aquí.",
"Write your post here.": "Escrivètz vòstra publicacion aquí.",
"old posts, page %d": "publicacions ancianas, pagina %d",
"page %d": "pagina %d",
"updated": "actualizada",
}
| {
"content_hash": "713dbc57ff100594e2ad6f24bd99307d",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 72,
"avg_line_length": 39.8125,
"alnum_prop": 0.6227106227106227,
"repo_name": "getnikola/nikola",
"id": "a51a9f3ffde96c3891960b196558235e39f0645b",
"size": "1943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/data/themes/base/messages/messages_oc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34036"
},
{
"name": "HTML",
"bytes": "239"
},
{
"name": "JavaScript",
"bytes": "2076"
},
{
"name": "Jupyter Notebook",
"bytes": "568"
},
{
"name": "Python",
"bytes": "1299776"
},
{
"name": "Shell",
"bytes": "9704"
},
{
"name": "XSLT",
"bytes": "3619"
}
],
"symlink_target": ""
} |
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import (
build_policy_assignments_create_by_id_request,
build_policy_assignments_create_request,
build_policy_assignments_delete_by_id_request,
build_policy_assignments_delete_request,
build_policy_assignments_get_by_id_request,
build_policy_assignments_get_request,
build_policy_assignments_list_for_resource_group_request,
build_policy_assignments_list_for_resource_request,
build_policy_assignments_list_request,
build_policy_definitions_create_or_update_request,
build_policy_definitions_delete_request,
build_policy_definitions_get_request,
build_policy_definitions_list_request,
)
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PolicyAssignmentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.policy.v2015_10_01_preview.aio.PolicyClient`'s
:attr:`policy_assignments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def delete(self, scope: str, policy_assignment_name: str, **kwargs: Any) -> _models.PolicyAssignment:
"""Deletes a policy assignment.
:param scope: The scope of the policy assignment. Required.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to delete. Required.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
request = build_policy_assignments_delete_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {"url": "/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}"} # type: ignore
@overload
async def create(
self,
scope: str,
policy_assignment_name: str,
parameters: _models.PolicyAssignment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group.
:param scope: The scope of the policy assignment. Required.
:type scope: str
:param policy_assignment_name: The name of the policy assignment. Required.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment. Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create(
self,
scope: str,
policy_assignment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group.
:param scope: The scope of the policy assignment. Required.
:type scope: str
:param policy_assignment_name: The name of the policy assignment. Required.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Known values are: 'application/json', 'text/json'. Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create(
self, scope: str, policy_assignment_name: str, parameters: Union[_models.PolicyAssignment, IO], **kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group.
:param scope: The scope of the policy assignment. Required.
:type scope: str
:param policy_assignment_name: The name of the policy assignment. Required.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json',
'text/json'. Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PolicyAssignment")
request = build_policy_assignments_create_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {"url": "/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}"} # type: ignore
@distributed_trace_async
async def get(self, scope: str, policy_assignment_name: str, **kwargs: Any) -> _models.PolicyAssignment:
"""Gets a policy assignment.
:param scope: The scope of the policy assignment. Required.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to get. Required.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
request = build_policy_assignments_get_request(
scope=scope,
policy_assignment_name=policy_assignment_name,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/{scope}/providers/Microsoft.Authorization/policyassignments/{policyAssignmentName}"} # type: ignore
@distributed_trace
def list_for_resource_group(
self, resource_group_name: str, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.PolicyAssignment"]:
"""Gets policy assignments for the resource group.
:param resource_group_name: The name of the resource group that contains policy assignments.
Required.
:type resource_group_name: str
:param filter: The filter to apply on the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignmentListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_policy_assignments_list_for_resource_group_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_for_resource_group.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_for_resource_group.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments"} # type: ignore
@distributed_trace
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.PolicyAssignment"]:
"""Gets policy assignments for a resource.
:param resource_group_name: The name of the resource group containing the resource. The name is
case insensitive. Required.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider. Required.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path. Required.
:type parent_resource_path: str
:param resource_type: The resource type. Required.
:type resource_type: str
:param resource_name: The name of the resource with policy assignments. Required.
:type resource_name: str
:param filter: The filter to apply on the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignmentListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_policy_assignments_list_for_resource_request(
resource_group_name=resource_group_name,
resource_provider_namespace=resource_provider_namespace,
parent_resource_path=parent_resource_path,
resource_type=resource_type,
resource_name=resource_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_for_resource.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_for_resource.metadata = {"url": "/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyassignments"} # type: ignore
@distributed_trace
def list(self, filter: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.PolicyAssignment"]:
"""Gets all the policy assignments for a subscription.
:param filter: The filter to apply on the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignment or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignmentListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_policy_assignments_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyAssignmentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyassignments"} # type: ignore
@distributed_trace_async
async def delete_by_id(self, policy_assignment_id: str, **kwargs: Any) -> _models.PolicyAssignment:
"""Deletes a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to delete. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
Required.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
request = build_policy_assignments_delete_by_id_request(
policy_assignment_id=policy_assignment_id,
api_version=api_version,
template_url=self.delete_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_by_id.metadata = {"url": "/{policyAssignmentId}"} # type: ignore
@overload
async def create_by_id(
self,
policy_assignment_id: str,
parameters: _models.PolicyAssignment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group. When providing a scope
for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
Required.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment. Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_by_id(
self, policy_assignment_id: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group. When providing a scope
for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
Required.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Known values are: 'application/json', 'text/json'. Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_by_id(
self, policy_assignment_id: str, parameters: Union[_models.PolicyAssignment, IO], **kwargs: Any
) -> _models.PolicyAssignment:
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group. When providing a scope
for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
Required.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json',
'text/json'. Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PolicyAssignment")
request = build_policy_assignments_create_by_id_request(
policy_assignment_id=policy_assignment_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_by_id.metadata = {"url": "/{policyAssignmentId}"} # type: ignore
@distributed_trace_async
async def get_by_id(self, policy_assignment_id: str, **kwargs: Any) -> _models.PolicyAssignment:
"""Gets a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to get. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
Required.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyAssignment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyAssignment]
request = build_policy_assignments_get_by_id_request(
policy_assignment_id=policy_assignment_id,
api_version=api_version,
template_url=self.get_by_id.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyAssignment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {"url": "/{policyAssignmentId}"} # type: ignore
class PolicyDefinitionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.resource.policy.v2015_10_01_preview.aio.PolicyClient`'s
:attr:`policy_definitions` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@overload
async def create_or_update(
self,
policy_definition_name: str,
parameters: _models.PolicyDefinition,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.PolicyDefinition:
"""Creates or updates a policy definition.
:param policy_definition_name: The name of the policy definition to create. Required.
:type policy_definition_name: str
:param parameters: The policy definition properties. Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def create_or_update(
self, policy_definition_name: str, parameters: IO, *, content_type: str = "application/json", **kwargs: Any
) -> _models.PolicyDefinition:
"""Creates or updates a policy definition.
:param policy_definition_name: The name of the policy definition to create. Required.
:type policy_definition_name: str
:param parameters: The policy definition properties. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Known values are: 'application/json', 'text/json'. Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def create_or_update(
self, policy_definition_name: str, parameters: Union[_models.PolicyDefinition, IO], **kwargs: Any
) -> _models.PolicyDefinition:
"""Creates or updates a policy definition.
:param policy_definition_name: The name of the policy definition to create. Required.
:type policy_definition_name: str
:param parameters: The policy definition properties. Is either a model type or a IO type.
Required.
:type parameters: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json',
'text/json'. Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyDefinition]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PolicyDefinition")
request = build_policy_definitions_create_or_update_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyDefinition", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self, policy_definition_name: str, **kwargs: Any
) -> None:
"""Deletes a policy definition.
:param policy_definition_name: The name of the policy definition to delete. Required.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_policy_definitions_delete_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"} # type: ignore
@distributed_trace_async
async def get(self, policy_definition_name: str, **kwargs: Any) -> _models.PolicyDefinition:
"""Gets the policy definition.
:param policy_definition_name: The name of the policy definition to get. Required.
:type policy_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyDefinition or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyDefinition]
request = build_policy_definitions_get_request(
policy_definition_name=policy_definition_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PolicyDefinition", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions/{policyDefinitionName}"} # type: ignore
@distributed_trace
def list(self, filter: Optional[str] = None, **kwargs: Any) -> AsyncIterable["_models.PolicyDefinition"]:
"""Gets all the policy definitions for a subscription.
:param filter: The filter to apply on the operation. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyDefinition or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2015_10_01_preview.models.PolicyDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2015-10-01-preview")) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.PolicyDefinitionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_policy_definitions_list_request(
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("PolicyDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policydefinitions"} # type: ignore
| {
"content_hash": "c1a2a93e66692bcb773ab42824383714",
"timestamp": "",
"source": "github",
"line_count": 1194,
"max_line_length": 267,
"avg_line_length": 46.29564489112228,
"alnum_prop": 0.6397054832932323,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2ac5c095cd24898e9b445f04147e83ccaf4994d8",
"size": "55777",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2015_10_01_preview/aio/operations/_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import hashlib
from utils.dict import mget
from sanetime import time
from django.db import models as djmodels
class Event(object):
@property
def hub(self): return self.account.hub
@property
def starts_at(self): return self._starts_at or self._started_at
@starts_at.setter
def starts_at(self, starts_at): self._starts_at = starts_at
@property
def ends_at(self): return self._ends_at or self._ended_at
@ends_at.setter
def ends_at(self, ends_at): self._ends_at = ends_at
@property
def started_at(self): return self._started_at or self._starts_at
@started_at.setter
def started_at(self, started_at): self._started_at = started_at
@property
def ended_at(self): return self._ended_at or self._ends_at
@ended_at.setter
def ended_at(self, ended_at): self._ended_at = ended_at
@property
def duration_short_string(self):
if self.duration.m <= 90: return "%sm" % self.duration.m
return "%.1fh" % self.duration.fh
@property
def scheduled_duration(self): return self.starts_at and self.ends_at and (self.ends_at-self.starts_at)
@scheduled_duration.setter
def scheduled_duration(self, delta):
if self.starts_at: self.ends_at = self.starts_at + delta
@property
def actual_duration(self): return self.started_at and self.ended_at and (self.ended_at-self.started_at)
@actual_duration.setter
def actual_duration(self, delta):
if self.started_at: self.ended_at = self.started_at + delta
@property
def duration(self): return self.scheduled_duration
@duration.setter
def duration(self, delta): self.scheduled_duration = delta
@property
def _starts_at(self): return self._time_starts_at and self._timezone and time(self._time_starts_at.us, self._timezone)
@_starts_at.setter
def _starts_at(self, starts_at):
if starts_at is None:
self._time_starts_at = None
else:
self._time_starts_at = time(starts_at.us)
self._timezone = starts_at.tz_name
@property
def _ends_at(self): return self._time_ends_at and self._timezone and time(self._time_ends_at.us, self._timezone)
@_ends_at.setter
def _ends_at(self, ends_at):
if ends_at is None:
self._time_ends_at = None
else:
self._time_ends_at = time(ends_at.us)
self._timezone = ends_at.tz_name
@property
def _started_at(self): return self._time_started_at and self._timezone and time(self._time_started_at.us, self._timezone)
@_started_at.setter
def _started_at(self, started_at):
if started_at is None:
self._time_started_at = None
else:
self._time_started_at = time(started_at.us)
self._timezone = started_at.tz_name
@property
def _ended_at(self): return self._time_ended_at and self._timezone and time(self._time_ended_at.us, self._timezone)
@_ended_at.setter
def _ended_at(self, ended_at):
if ended_at is None:
self._time_ended_at = None
else:
self._time_ended_at = time(ended_at.us)
self._timezone = ended_at.tz_name
@classmethod
def calc_hashcode(kls, **kwargs):
key = mget(kwargs,'remote_id', 'session_key', 'universal_key')
if key:
source = key
else:
title = mget(kwargs,'title','subject')
us_starts_at = kwargs.get('_time_starts_at',kwargs.get('starts_at',time(0)).us)
us_ends_at = kwargs.get('_time_ends_at',kwargs.get('ends_at',time(0)).us)
source = '%s|%s|%s'%(title,us_starts_at,us_ends_at)
return int(int(hashlib.md5(source).hexdigest(),16)%2**31)
def ensure_hashcode(self):
self.hashcode = self.__class__.calc_hashcode(**self.__dict__)
| {
"content_hash": "297f7945fe34fcc006f320d445379631",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 125,
"avg_line_length": 35.56481481481482,
"alnum_prop": 0.6258786774277532,
"repo_name": "prior/webinars",
"id": "446246b5b968cfdb8e537c394f13c8db283e2713",
"size": "3841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webinars_web/webinars/models/mixins/event.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1671417"
},
{
"name": "Shell",
"bytes": "1175"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import hashlib
from google.appengine.ext import ndb
import model
import util
import config
class User(model.Base):
name = ndb.StringProperty(required=True)
username = ndb.StringProperty(required=True)
email = ndb.StringProperty(default='')
auth_ids = ndb.StringProperty(repeated=True)
active = ndb.BooleanProperty(default=True)
admin = ndb.BooleanProperty(default=False)
permissions = ndb.StringProperty(repeated=True)
verified = ndb.BooleanProperty(default=False)
token = ndb.StringProperty(default='')
def has_permission(self, perm):
return self.admin or perm in self.permissions
def avatar_url_size(self, size=None):
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5(self.email or self.username).hexdigest(),
'size': '&s=%d' % size if size > 0 else '',
}
avatar_url = property(avatar_url_size)
_PROPERTIES = model.Base._PROPERTIES.union({
'active',
'admin',
'auth_ids',
'avatar_url',
'email',
'name',
'permissions',
'username',
'verified',
})
@classmethod
def get_dbs(
cls, admin=None, active=None, verified=None, permissions=None, **kwargs
):
return super(User, cls).get_dbs(
admin=admin or util.param('admin', bool),
active=active or util.param('active', bool),
verified=verified or util.param('verified', bool),
permissions=permissions or util.param('permissions', list),
**kwargs
)
@classmethod
def is_username_available(cls, username, self_key=None):
if self_key is None:
return cls.get_by('username', username) is None
user_keys, _ = util.get_keys(cls.query(), username=username, limit=2)
return not user_keys or self_key in user_keys and not user_keys[1:]
@classmethod
def is_email_available(cls, email, self_key=None):
if not config.CONFIG_DB.check_unique_email:
return True
user_keys, _ = util.get_keys(
cls.query(), email=email, verified=True, limit=2,
)
return not user_keys or self_key in user_keys and not user_keys[1:]
| {
"content_hash": "1aa0758672dd0bf5301b5207494b2eac",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 77,
"avg_line_length": 30.422535211267604,
"alnum_prop": 0.6606481481481481,
"repo_name": "mdxs/gae-init-docs",
"id": "ff3706d4ec3e110ac32905e258b09148a419bd2a",
"size": "2177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main/model/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11008"
},
{
"name": "CoffeeScript",
"bytes": "14405"
},
{
"name": "Python",
"bytes": "70329"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from django import forms
from .widgets import RangeWidget, LookupTypeWidget
class RangeField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.DecimalField(),
forms.DecimalField(),
)
super(RangeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return slice(*data_list)
return None
class DateRangeCompareField(forms.MultiValueField):
widget = RangeWidget
def __init__(self, *args, **kwargs):
fields = (
forms.DateField(),
forms.DateField(),
)
super(DateRangeCompareField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if data_list:
return slice(*data_list)
return None
Lookup = namedtuple('Lookup', ('value', 'lookup_type'))
class LookupTypeField(forms.MultiValueField):
def __init__(self, field, lookup_choices, *args, **kwargs):
fields = (
field,
forms.ChoiceField(choices=lookup_choices)
)
defaults = {
'widgets': [f.widget for f in fields],
}
widget = LookupTypeWidget(**defaults)
kwargs['widget'] = widget
super(LookupTypeField, self).__init__(fields, *args, **kwargs)
def compress(self, data_list):
if len(data_list)==2:
return Lookup(value=data_list[0], lookup_type=data_list[1] or 'exact')
return Lookup(value=None, lookup_type='exact')
| {
"content_hash": "d6cf73ac094f315fd3a61700f68bbfab",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 82,
"avg_line_length": 30.142857142857142,
"alnum_prop": 0.601303317535545,
"repo_name": "sanmugamk/django-filter",
"id": "0a791a727302cc696e535d82667fae4a584e223b",
"size": "1688",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_filters/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "73"
},
{
"name": "Python",
"bytes": "129570"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import os
import re
import sqlite3
import qrcode
import codecs
import argparse
from urllib import quote, quote_plus
default_db = 'databases'
# https://code.google.com/p/google-authenticator/wiki/KeyUriFormat
def make_qrimage(url):
qr = qrcode.QRCode()
qr.add_data(url)
qr.make()
return qr
def query_db(args):
conn = sqlite3.connect(args.db)
conn.row_factory = sqlite3.Row
c = conn.cursor()
qry = '''SELECT * FROM accounts '''
if args.id:
qry += ''' WHERE _id={id}'''.format(id=args.id)
qry += ''' ORDER BY _id'''
resp = c.execute(qry)
return resp
def list_entries(args):
resp = query_db(args)
fmt = "% 3s: %-20s: %s"
title = fmt % ('ID', 'Issuer', 'e-mail')
print("%s\n%s" % (title, '-' * len(title)))
for row in resp:
print(fmt % (row['_id'], row['issuer'] if row['issuer'] is not None else row['original_name'], row['email']))
def create_otpauth_url(atype, secret, email, **kwds):
if isinstance(atype, int):
if atype == 0:
atype = 'totp'
elif atype == 1:
atype == 'hotp'
else:
raise ValueError("Invalid OTP type '%s'" % atype)
url = "otpauth://" + atype + "/" + quote(email, safe="/@") + "?secret=" + secret
if atype == 'totp':
if 'period' in kwds:
url += '&period=' + kwds['period']
if 'digits' in kwds:
url += '&digits=' + kwds['digits']
elif atype == 'hotp':
if 'counter' in kwds:
url +='&counter=' + kwds['counter']
if 'issuer' in kwds:
url += "&issuer=" + quote_plus(kwds['issuer'])
return url
def process_db(args):
if args.list_entries:
return list_entries(args)
isatty = os.isatty(sys.stdout.fileno())
if not isatty:
# Stdout is not a terminal. Force it to be utf-8.
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
resp = query_db(args)
for row in resp:
qrurl = create_otpauth_url(atype=row['type'], secret=row['secret'], email=row['email'],
issuer=row['issuer'] if row['issuer'] is not None else row['original_name'],
counter=row['counter'])
if args.url_only:
print(qrurl)
qr = make_qrimage(qrurl)
if args.tty:
print("%s: %s" % (row['issuer'] if row['issuer'] is not None else row['original_name'], row['email']))
if isatty:
qr.print_tty()
print()
raw_input('Press enter to continue...')
else:
qr.print_ascii(out=sys.stdout, invert=True)
print()
if args.png:
outname = re.sub('[:/]', '_', row['email']) + '.png'
print(outname)
img = qr.make_image()
img.save(outname)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--db", "--ga-db", help="Name of the Google Authenticator database file")
parser.add_argument("--list-entries", action="store_true", help="List the entries in the database file.")
parser.add_argument("--png", action="store_true", help="Save generated codes as PNG files.")
parser.add_argument("--tty", action="store_true", help="Dump entries to console as ANSI graphics")
parser.add_argument("--url-only", action="store_true", help="Only print the otpauth:// URLs.")
parser.add_argument("--id", help="Dump only the one which has the id")
args = parser.parse_args()
if not args.db:
args.db = default_db
if not os.path.exists(args.db):
print("Database file '%s' not exists." % args.db)
sys.exit(1)
process_db(args)
if __name__ == '__main__':
main()
| {
"content_hash": "385b889850cf2c5331f0be4bb0e6e12d",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 117,
"avg_line_length": 29.992125984251967,
"alnum_prop": 0.5571016014702022,
"repo_name": "folti/ga-qrdump",
"id": "890c1293e5d406d82cb0fea0c9acc26f82794ce5",
"size": "3831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ga-qrdump.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3831"
}
],
"symlink_target": ""
} |
from rauth import OAuth1Service, OAuth2Service
from flask import current_app, url_for, request, redirect, session
class OAuthSignIn(object):
providers = None
def __init__(self, provider_name):
self.provider_name = provider_name
credentials = current_app.config['OAUTH_CREDENTIALS'][provider_name]
self.consumer_id = credentials['id']
self.consumer_secret = credentials['secret']
def authorize(self):
pass
def callback(self):
pass
def get_callback_url(self):
return url_for('oauth_callback', provider=self.provider_name,
_external=True)
@classmethod
def get_provider(self, provider_name):
if self.providers is None:
self.providers = {}
for provider_class in self.__subclasses__():
provider = provider_class()
self.providers[provider.provider_name] = provider
return self.providers[provider_name]
class FacebookSignIn(OAuthSignIn):
def __init__(self):
super(FacebookSignIn, self).__init__('facebook')
self.service = OAuth2Service(
name='facebook',
client_id=self.consumer_id,
client_secret=self.consumer_secret,
authorize_url='https://www.facebook.com/dialog/oauth',
access_token_url='https://graph.facebook.com/oauth/access_token',
base_url='https://graph.facebook.com/'
)
def authorize(self):
return redirect(self.service.get_authorize_url(
scope='email',
response_type='code',
redirect_uri=self.get_callback_url())
)
def callback(self):
if 'code' not in request.args:
return None, None, None
oauth_session = self.service.get_auth_session(
data={'code': request.args['code'],
'grant_type': 'authorization_code',
'redirect_uri': self.get_callback_url()}
)
me = oauth_session.get('me?fields=id,email').json()
return (
'facebook$' + me['id'],
me.get('email').split('@')[0], # Facebook does not provide
# username, so the email's user
# is used instead
me.get('email')
) | {
"content_hash": "b11482b0c957b9f381d5e6ad53159030",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 77,
"avg_line_length": 34.544117647058826,
"alnum_prop": 0.5627926777352065,
"repo_name": "gretahuang/calhacks2015",
"id": "8ba2264356265dfd8094c6a57a5f787619d2b19e",
"size": "2349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oauth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "938"
},
{
"name": "HTML",
"bytes": "1408"
},
{
"name": "Python",
"bytes": "14792"
}
],
"symlink_target": ""
} |
import socketserver
import threading
import struct
#
# Class to handle receiving UDP data from XPlane
#
class XPlaneUDPHandler(socketserver.BaseRequestHandler):
#
# Handler for receving data
#
def handle(self):
raw_data = self.request[0]
unpacked_data = self.unpack_data(raw_data)
print(unpacked_data)
#
# Unpack the UDP payload into a dict
#
# Returned dict format:
# {<Index in XPlane Data Options>: [Item0, Item1, Item2, Item3, Item4, Item5, Item6, Item7]
#
# Note: If value of Item is -999 then that item is not used.
#
def unpack_data(self, buf):
unpacked_data = {}
payload = struct.iter_unpack('iffffffff', buf[5:])
for item in payload:
if item != None:
unpacked_data[item[0]] = item[1:]
return unpacked_data
#
# Class for threading mixin
#
class ThreadedUDPServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
#
# Server Class
#
class XPlaneUDPServer():
#
# Init (Start the server)
#
def __init__(self, host='0.0.0.0', port=48000):
self.server = ThreadedUDPServer((host, port), XPlaneUDPHandler)
self.server_thread = threading.Thread(target=self.server.serve_forever)
self.server_thread.daemon = False
self.server_thread.start()
#
# Shutdown the server
#
def shutdown(self):
self.server.shutdown()
| {
"content_hash": "54a4160bf3c74a6392eee31e2d94d1c5",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 92,
"avg_line_length": 20.365079365079364,
"alnum_prop": 0.7061574434918161,
"repo_name": "jbumanlag/XPlaneCockpit",
"id": "a7e41e2e19888f4a5bca9f1f33c26406874d1d02",
"size": "1396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "RPI2/XPlaneUDP/server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "1898"
},
{
"name": "Python",
"bytes": "8160"
}
],
"symlink_target": ""
} |
import binascii
import gzip
import os.path
import tempfile
import xml.etree.ElementTree as Et
from . import utils
STRIP_ATTRS = ['flags', 'source']
def create_imc_blob(xml_fname, dst_folder, dst_file, force):
dst_fname = os.path.join(dst_folder, dst_file)
xml_md5 = utils.compute_md5(xml_fname)
# Create destination folder.
try:
os.makedirs(dst_folder, exist_ok=True)
except OSError as e:
print('ERROR: failed to create destination folder: ' + e.strerror)
return 1
if not force:
if utils.file_md5_matches(dst_fname, xml_md5):
print('* ' + dst_fname + ' [Skipped]')
return 0
# Parse XML specification.
tree = Et.parse(xml_fname)
# Remove 'description' and unneeded attributes tags.
for parent in tree.getiterator():
map(lambda a: parent.attrib.pop(a, None), STRIP_ATTRS)
for child in parent:
map(lambda a: child.attrib.pop(a, None), STRIP_ATTRS)
if child.tag == 'description':
parent.remove(child)
# Create output document.
root = tree.getroot()
text = b'<?xml version="1.0" encoding="UTF-8"?>\n' + Et.tostring(root, encoding='utf-8')
# Compress to temporary file.
tmp = tempfile.NamedTemporaryFile(delete=False)
f_out = gzip.GzipFile(tmp.name, 'wb', compresslevel=9, mtime=0)
f_out.write(text)
f_out.close()
# Create HPP file.
hpp = utils.File(dst_file, dst_folder, ns=True, md5=xml_md5)
hpp.add_isoc_headers('cstddef')
# Byte array.
hpp.append('const unsigned char c_imc_blob[] = \n{')
octets = []
with open(tmp.name, 'rb') as f_gz:
h = binascii.hexlify(f_gz.read())
octets += ['0x' + h[i: i + 2].decode() for i in range(0, len(h), 2)]
octets_per_line = 12
for i in range(0, len(octets), octets_per_line):
ol = octets[i: i + octets_per_line]
s = ','
if i + octets_per_line >= len(octets):
s = ''
hpp.append(', '.join(ol) + s)
hpp.append('};\n')
hpp.append('class Blob')
hpp.append('{')
hpp.append('public:')
# getData()
f = utils.Function('getData', 'const unsigned char*', static=True)
f.body('return c_imc_blob;')
hpp.append(f)
# getSize()
f = utils.Function('getSize', 'size_t', static=True)
f.body('return sizeof(c_imc_blob);')
hpp.append(f)
hpp.append('};')
# Write files.
hpp.write()
| {
"content_hash": "fd1389209fe141ddbd0b13d4060a29c9",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 92,
"avg_line_length": 28.476744186046513,
"alnum_prop": 0.5928950592078399,
"repo_name": "oceanscan/imctrans",
"id": "3b833da7b9fff0400a308c7286d1865b87159a52",
"size": "3783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imctrans/cpp/blob.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1402835"
},
{
"name": "CMake",
"bytes": "8341"
},
{
"name": "Python",
"bytes": "52424"
}
],
"symlink_target": ""
} |
import asyncio
import collections
import logging
import time
from http.client import responses
from urllib.parse import urlparse
from urllib.parse import urlunparse
import aiohttp
from aiohttp import errors
from aiohttp import hdrs
from tsproxy import common
NO_CONTENT = 204
NOT_MODIFIED = 304
logger = logging.getLogger(__name__)
RequestMessage = collections.namedtuple(
'RequestMessage',
['method', 'path', 'version', 'headers', 'raw_headers',
'should_close', 'compression', 'request_line', 'url', 'body', 'error', 'request_time'])
RequestURL = collections.namedtuple(
'RequestURL',
['full_url', 'full_path', 'scheme', 'netloc', 'hostname', 'port', 'path', 'query'])
ResponseMessage = collections.namedtuple(
'ResponseMessage',
['version', 'code', 'reason', 'headers', 'raw_headers',
'should_close', 'compression', 'chunked', 'content_length', 'response_line',
'head_length', 'body', 'raw_data', 'error', 'response_time'])
class HttpParser(aiohttp.protocol.HttpParser):
def __init__(self, max_line_size=10240, max_headers=32768,
max_field_size=10240):
super().__init__(max_line_size, max_headers, max_field_size)
def _parse_version(self, version):
try:
if version.startswith('HTTP/'):
n1, n2 = version[5:].split('.', 1)
obj_version = aiohttp.HttpVersion(int(n1), int(n2))
else:
raise errors.BadStatusLine(version)
except:
raise errors.BadStatusLine(version)
if obj_version <= aiohttp.protocol.HttpVersion10: # HTTP 1.0 must asks to not close
close = True
else: # HTTP 1.1 must ask to close.
close = False
return close
def parse_headers(self, lines, status=200, request_method='GET', default_close=True):
headers, raw_headers, close, compression = super().parse_headers(lines)
# are we using the chunked-style of transfer encoding?
tr_enc = headers.get(hdrs.TRANSFER_ENCODING)
if tr_enc and tr_enc.lower() == "chunked":
chunked = True
else:
chunked = False
# do we have a Content-Length?
# NOTE: RFC 2616, S4.4, #3 says we ignore this if tr_enc is "chunked"
content_length = headers.get(hdrs.CONTENT_LENGTH)
# are we using the chunked-style of transfer encoding?
if content_length and not chunked:
try:
length = int(content_length)
except ValueError:
length = None
else:
if length < 0: # ignore nonsensical negative lengths
length = None
else:
length = None
# does the body have a fixed length? (of zero)
if length is None \
and (status == NO_CONTENT or status == NOT_MODIFIED or 100 <= status < 200 or request_method == "HEAD"):
length = 0
# then the headers weren't set in the request
return headers, raw_headers, default_close if close is None else close, compression, chunked, length
def bad_request(error=None, request_line=None, method=None, version=None, url=None, close=True, timeout=None, request_time=None):
if timeout and not error:
error = errors.BadHttpMessage('read request timeout(%d)' % timeout)
return RequestMessage(
method, None if not url else url.full_path, version, None, None,
close, None, request_line, url, None, error, request_time if request_time else time.time())
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
def bad_response(error, response_line=None, raw_data=b'', timeout=None):
if timeout and not error:
error = errors.BadHttpMessage('read response timeout(%d)' % timeout)
return ResponseMessage(
None, error.code, error.message, None, None,
None, None, False, None, response_line,
len(raw_data), None, raw_data, error, time.time())
class HttpRequestParser(HttpParser):
def parse_request(self, reader, raw_data=b'', read_timeout=common.default_timeout):
# read HTTP message (request line + headers)
request_time = time.time()
try:
with common.Timeout(read_timeout):
if raw_data and raw_data.find(b'\r\n') > 0:
pass
else:
raw_data += yield from reader.readuntil(b'\r\n')
request_line, _ = raw_data.split(b'\r\n', 1)
request_line = request_line.decode('utf-8', 'surrogateescape')
method, version, url, close = self._parse_requestline(request_line)
while True:
end_index = raw_data.find(b'\r\n\r\n')
if raw_data and end_index > 0:
header_lines = raw_data[:end_index+4]
body = raw_data[end_index+4:]
break
else:
raw_data += yield from reader.readuntil(b'\r\n')
_request = self._parse_request(header_lines, request_line, method, version, url, close, request_time)
except EOFError:
return None
except aiohttp.HttpProcessingError as bad_req:
return bad_request(bad_req, request_line, request_time=request_time)
except (TimeoutError, asyncio.TimeoutError):
return bad_request(errors.BadHttpMessage('read request timeout(%d)' % read_timeout), request_line, method, version, url, close, request_time=request_time)
except asyncio.LimitOverrunError as exc:
return bad_request(errors.LineTooLong('%s' % raw_data, exc.consumed), request_line, method, version, url, close, request_time=request_time)
chunk_len = len(reader)
if chunk_len > 0:
body += yield from reader.read_bytes(size=chunk_len)
if len(body) > 0:
_request = _request._replace(body=body)
return _request
def _parse_requestline(self, line):
# request line
# line = request_line.decode('utf-8', 'surrogateescape')
try:
method, path, version = line.split(None, 2)
except ValueError:
raise errors.BadStatusLine(line) from None
# method
method = method.upper()
if not aiohttp.protocol.METHRE.match(method):
raise errors.BadStatusLine(method)
# version
close = self._parse_version(version)
# path
url = self.parse_path(path, method)
return method, version, url, close
def _parse_request(self, raw_data, line, method, version, url, default_close=True, request_time=None):
lines = raw_data.split(b'\r\n')
# read headers
headers, raw_headers, close, compression, _, _ = self.parse_headers(lines, default_close=default_close)
if close is None: # then the headers weren't set in the request
close = default_close
return RequestMessage(
method, url.full_path, version, headers, raw_headers,
close, compression, line, url, b'', None, request_time if request_time else time.time())
@staticmethod
def parse_path(path, method):
""" ['full_url', 'full_path', 'scheme', 'netloc', 'hostname', 'port', 'path', 'query']) """
url = urlparse(path)
result = {'scheme': url.scheme if url.scheme else 'http' if url.netloc else '',
'netloc': url.netloc,
'path': url.path,
'query': url.query}
result['full_url'] = urlunparse((result['scheme'], result['netloc'], url.path, url.params, url.query, url.fragment))
result['full_path'] = urlunparse(('', '', url.path, url.params, url.query, url.fragment))
if method == 'CONNECT':
hostname, port = url.path.split(':')
result['hostname'] = hostname
result['port'] = int(port) if port else 443
else:
result['hostname'] = url.hostname if url.hostname else None
result['port'] = int(url.port) if url.port else 80
return RequestURL(**result)
class HttpResponseParser(HttpParser):
"""Read response status line and headers.
BadStatusLine could be raised in case of any errors in status line.
Returns RawResponseMessage"""
def parse_response(self, raw_data, request_method='GET'):
# read HTTP message (response line + headers)
try:
if raw_data and raw_data.find(b'\r\n') > 0:
pass
else:
return None, 0
response_line, _ = raw_data.split(b'\r\n', 1)
response_line = response_line.decode('utf-8', 'surrogateescape')
version, status, reason, default_close = self._parse_responseline(response_line)
if raw_data and raw_data.find(b'\r\n\r\n') > 0:
consumed = raw_data.find(b'\r\n\r\n') + 4
else:
return None, 0
_response = self._parse_response(raw_data, response_line, version, status, reason, request_method, default_close)
except aiohttp.HttpProcessingError as bad_req:
return bad_response(bad_req, response_line, raw_data), 0
body_len = len(raw_data) - consumed
if body_len > 0:
if _response.content_length is not None and body_len > _response.content_length:
body_len = _response.content_length
body = raw_data[consumed: consumed + body_len]
_response = _response._replace(body=body)
consumed += body_len
return _response, consumed
def _parse_response(self, raw_data, line, version, status, reason, request_method='GET', default_close=True):
lines = raw_data.split(b'\r\n')
# read headers
headers, raw_headers, close, compression, chunked, length = self.parse_headers(lines, status, request_method, default_close)
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
return ResponseMessage(
version, status, reason.strip(), headers, raw_headers,
close, compression, chunked, length, line,
len(raw_data), b'', raw_data, None, time.time())
def _parse_responseline(self, line):
# response line
try:
version, status = line.split(None, 1)
except ValueError:
raise errors.BadStatusLine(line) from None
else:
try:
status, reason = status.split(None, 1)
except ValueError:
reason = ''
# version
close = self._parse_version(version)
# The status code is a three-digit number
try:
status = int(status)
except ValueError:
raise errors.BadStatusLine(line) from None
if status < 100 or status > 999:
raise errors.BadStatusLine(line)
return version, status, reason, close
# ['version', 'code', 'reason', 'headers', 'raw_headers',
# 'should_close', 'compression', 'chunked', 'content_length', 'response_line',
# 'head_length', 'var', 'body', 'raw_data', 'error'])
def https_proxy_response(version=None, headers=None):
if not version:
version = 'HTTP/1.1'
response_line = '%s 200 Connection established' % version
raw_data = (response_line.encode() + b'\r\nProxy-Agent: taige-Smart-Proxy/0.1.0\r\n')
raw_headers = []
if headers:
for key in headers:
raw_data += ('%s: %s\r\n' % (key, headers[key])).encode()
raw_headers.append((key, headers[key]))
raw_data += b'\r\n'
return ResponseMessage(
version, 200, 'Connection established', headers, raw_headers,
True, False, False, None, response_line,
len(raw_data), b'', raw_data, None, time.time())
def http_response(version=None, status=200, reason=None, headers=None, content=None):
if not version:
version = 'HTTP/1.1'
if not reason:
reason = responses[status] + '(TSP)'
response_line = '%s %d %s' % (version, status, reason)
raw_data = ('%s\r\n' % response_line).encode()
raw_headers = []
if headers:
for key in headers:
raw_data += ('%s: %s\r\n' % (key, headers[key])).encode()
raw_headers.append((key, headers[key]))
if content:
raw_data += ('Content-Length: %d\r\n' % len(content)).encode()
raw_data += b'\r\n' + content.encode()
return ResponseMessage(
version, status, reason, headers, raw_headers,
True, False, False, None if not content else len(content), response_line,
len(raw_data), content.encode() if content else b'', raw_data, None, time.time())
def test():
request_text = (
b'GET http://user:pass@pki.google.com/add?jp3.iss.tf&us1&hk2 HTTP/1.1\r\n'
# b'GET http://user:pass@pki.google.com/GIAG2.crt;some_par?sdsf=sdf#some_fra HTTP/1.1\r\n'
# b'HEAD /sdsdfs HTTP/1.1\r\n'
# b'GET http://pki.google.com/GIAG2.crt;some_par?sdsf=sdf#some_fra HTTP/1.1\r\n'
# b'CONNECT photos-thumb.dropbox.com:443 HTTP/1.1\r\n'
b'Host: pki.google.com\r\n'
b'Proxy-Connection: keep-alive\r\n'
b'Accept: */*\r\n'
b'User-Agent: ocspd/1.0.3\r\n'
b'Accept-Language: zh-cn\r\n'
b'Content-Length: 15\r\n'
b'Accept-Encoding: gzip, deflate\r\n'
b'Connection: keep-alive\r\n\r\n'
b'safasdfa;jd;afd'
)
response_text = (
b'HTTP/1.1 400 Bad Request\r\n'
b'Server: bfe/1.0.8.14\r\n'
b'Date: Sat, 19 Mar 2016 05:07:02 GMT\r\n\r\n'
b'AAAsdfsdfsdf'
)
parser = HttpRequestParser()
# res = parser.request_parse(request_text, hostname='www.google.com', port=80)
res = parser._parse_request(request_text, None, None, None, None)
test_parse(res)
def test_parse(res):
print(res)
# print(res.error_code) # None (check this first)
# print(res.command) # "GET"
print(res.path) # "/who/ken/trust.html"
print(res.version) # "HTTP/1.1"
print(len(res.headers)) # 3
# # print(request.headers.keys()) # ['accept-charset', 'host', 'accept']
key = 'Proxy-Connection'
if key in res.headers:
print('del %s => %s' % (key, res.headers[key]))
del res.headers[key]
for key in res.headers:
print('%s => %s' % (key, res.headers[key]))
# print(res.headers['host']) # "cm.bell-labs.com"
def test_unparse(res, parser):
unrequest = parser.unparse_request(res)
print("'" + unrequest.decode() + "'")
# res = parser.read_response(response_text, 'GET')
# print(res)
# print("'%s' '%d' '%s' '%d' '%s'" % (res.version, res.status, res.reason, res.head_length, res.headers))
# print("body='%s'" % response_text[res.head_length:].decode())
# request_text = (
# # b'GET http://pki.google.com/GIAG2.crt HTTP/1.1\r\n'
# b'CONNECT photos-thumb.dropbox.com:443 HTTP/1.1\r\n'
# b'Host: pki.google.com\r\n'
# b'Proxy-Connection: keep-alive\r\n'
# b'Accept: */*\r\n'
# b'User-Agent: ocspd/1.0.3\r\n'
# b'Accept-Language: zh-cn\r\n'
# b'Accept-Encoding: gzip, deflate\r\n'
# b'Connection: keep-alive\r\n\r\n'
# b'safasdfa;jd;afd'
# )
#
# request.do_parse(request_text)
#
# print(request.error_code) # None (check this first)
# print(request.command) # "GET"
# print(request.path) # "/who/ken/trust.html"
# print(request.request_version) # "HTTP/1.1"
# print(len(request.headers)) # 3
# # print(request.headers.keys()) # ['accept-charset', 'host', 'accept']
# for key in request.headers:
# print('%s => %s' % (key, request.headers[key]))
# print(request.headers['host']) # "cm.bell-labs.com"
if __name__ == '__main__':
test()
| {
"content_hash": "37c9ac228c5c059cbbf7b21243fea0f8",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 166,
"avg_line_length": 39.37135922330097,
"alnum_prop": 0.5871401269958696,
"repo_name": "taige/PyTools",
"id": "4be9114872fc8fa19cf427713e3a98ea36ca5764",
"size": "16221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tsproxy/httphelper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "440164"
}
],
"symlink_target": ""
} |
'''GradientCanvas
This creates a canvas with a gradient background. The colors
of the gradient can be set with the set_colors method. The
axis can be set with set_axis, and must be "x" or "y".
I'm not presently using this, but thought it might be useful
for tooltips or the window background or something.
'''
import Tkinter as tk
class GradientCanvas(tk.Canvas):
'''A standard canvas widget with a gradient background'''
def __init__(self, *args, **kwargs):
tk.Canvas.__init__(self, *args, **kwargs)
self._color1 = "red"
self._color2 = "black"
self._axis = "x"
self.bind("<Configure>", self._draw_gradient)
def set_axis(self, axis):
'''Set the axis along which the gradient should be drawn
axis must be "x" or "y"
'''
if axis.lower() not in ("x","y"):
raise Exception("axis must be 'x' or 'y'")
self._axis = axis.lower()
def set_colors(self, color1, color2):
'''Set the colors used for the gradient'''
self._color1 = color1
self._color2 = color2
def _draw_gradient(self, event=None):
'''Draw the gradient
N.B. this takes from 2-70ms to complete, depending on the
size of the canvas
'''
self.delete("gradient")
width = self.winfo_width()
height = self.winfo_height()
if self._axis == "x":
limit = width
else:
limit = height
(r1,g1,b1) = self.winfo_rgb(self._color1)
(r2,g2,b2) = self.winfo_rgb(self._color2)
r_ratio = float(r2-r1) / limit
g_ratio = float(g2-g1) / limit
b_ratio = float(b2-b1) / limit
for i in range(limit):
nr = int(r1 + (r_ratio * i))
ng = int(g1 + (g_ratio * i))
nb = int(b1 + (b_ratio * i))
color = "#%4.4x%4.4x%4.4x" % (nr,ng,nb)
if self._axis == "y":
self.create_line(0,i,width,i,tags=("gradient",), fill=color)
else:
self.create_line(i,0,i,height, tags=("gradient",), fill=color)
self.lower("gradient")
if __name__ == "__main__":
root = tk.Tk()
canvas1 = GradientCanvas(root, width=200, height=200)
canvas1.pack(side="top", fill="both", expand=True)
canvas1.set_axis("x")
canvas1.set_colors("#ffffff", "#000000")
canvas2 = GradientCanvas(root, width=200, height=200)
canvas2.pack(side="top", fill="both", expand=True)
canvas2.set_axis("y")
canvas2.set_colors("red", "black")
root.mainloop()
| {
"content_hash": "95585076134f894f44bb16e20acfc97c",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 32.225,
"alnum_prop": 0.5616757176105508,
"repo_name": "boakley/robotframework-workbench",
"id": "c481dc66feee61cbe40623b57941f54958884eb5",
"size": "2578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rwb/widgets/gradient_canvas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "439210"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'gerrit-dashboard-nfv'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None} | {
"content_hash": "f4a41509581ff383321809d10eb1133d",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 79,
"avg_line_length": 31.098360655737704,
"alnum_prop": 0.6736953083816553,
"repo_name": "sbauza/gerrit-dashboard-nfv",
"id": "db552ac13203a9282fa88e6dd5af8a7bc6ad5937",
"size": "2467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "146465"
}
],
"symlink_target": ""
} |
'''This is test module
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
import unittest
from sinoera.ganzhi import xinmao13
TestXinmao13Functions(unittest.TestCase):
def setUp(self):
pass
def test_XXX(self):
pass
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "0e02c64841378f73c3df3a6d949dcbb7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 53,
"avg_line_length": 17.45,
"alnum_prop": 0.6790830945558739,
"repo_name": "sinotradition/sinoera",
"id": "6f17e62712f2e76fcf7f9a612e955969ee97909a",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sinoera/tst/ganzhi/test_xinmao13.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74484"
}
],
"symlink_target": ""
} |
from django.http import HttpResponse
import json
import datetime
class JSONResponse(HttpResponse):
def __init__(self, content=b'', **kwargs):
content = json.dumps(content, default=self.support_datetime_default, sort_keys=True)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
self['Access-Control-Allow-Origin'] = '*'
self['Access-Control-Allow-Methods'] = 'POST, GET, OPTIONS'
self['Access-Control-Max-Age'] = '1000'
self['Access-Control-Allow-Headers'] = '*'
@staticmethod
def support_datetime_default(obj):
if isinstance(obj, datetime.date):
return "{0:%Y-%m-%d}".format(obj)
elif isinstance(obj, datetime.datetime):
return "{0:%Y-%m-%d %H:%M:%S}".format(obj)
elif isinstance(obj, datetime.time):
return "{0:%H:%M:%S}".format(obj)
raise TypeError(repr(obj) + " is not JSON serializable")
class JSONResponseNotFound(JSONResponse):
status_code = 404 | {
"content_hash": "9f0b669486cddea54842f3a861539c73",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 92,
"avg_line_length": 36.310344827586206,
"alnum_prop": 0.6229819563152896,
"repo_name": "zaubermaerchen/imas_cg_api",
"id": "e2d3a18daca7fdf33e66b713a188591576f035b0",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/response.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "511"
},
{
"name": "Python",
"bytes": "30235"
},
{
"name": "Shell",
"bytes": "20"
},
{
"name": "TSQL",
"bytes": "2535"
}
],
"symlink_target": ""
} |
"""Define version number here and read it from setup.py automatically"""
__version__ = "0.3.0"
| {
"content_hash": "4790b8b0ab5e97036ed0a5645261e34f",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 72,
"avg_line_length": 47.5,
"alnum_prop": 0.6947368421052632,
"repo_name": "quantumlib/OpenFermion-FQE",
"id": "ef9a9c7832a5f4cea6b5ce88996b8d674dc38326",
"size": "688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fqe/_version.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "219159"
},
{
"name": "Cython",
"bytes": "56133"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "1684470"
},
{
"name": "Shell",
"bytes": "5724"
}
],
"symlink_target": ""
} |
"""
@author: Ian
"""
import unittest
import os
import time
import numpy as np
import geosoft
import geosoft.gxapi as gxapi
import geosoft.gxpy.system as gsys
from base import GXPYTest
class Test(GXPYTest):
@classmethod
def tf(cls, f):
return os.path.join(os.path.dirname(cls._test_case_py), f)
def test_system(self):
self.start()
self.assertEqual(gsys.__version__, geosoft.__version__)
def test_call_location(self):
self.start()
f = gsys.call_location()
p = gsys.call_location(1)
self.assertIn("test_call_location", f)
self.assertIn("test_system.py", f)
self.assertIn("line 28", f)
self.assertIn("run", p)
def test_func(self):
self.start()
f = gsys.func_name()
p = gsys.func_name(2) # This may change with Python upgrades if the unittest.case code changes
self.assertEqual(f, "test_func")
self.assertEqual(p, "run")
def test_statics(self):
self.start()
app = gsys.app_name()
self.assertTrue(len(app) > 0)
func = gsys.func_name()
self.assertEqual(func, 'test_statics')
def test_unzip(self):
self.start()
folder, files = gsys.unzip(Test.tf('little.zip'), Test.tf('_test'), checkready=1)
self.assertEqual(len(files), 3)
self.assertTrue(os.path.isfile(folder + '\\little.png'))
self.assertTrue(os.path.isfile(folder + '\\little - Copy.png'))
self.assertTrue(os.path.isfile(folder + '\\little - Copy (2).png'))
gsys.remove_dir(folder, tries=0)
def test_task_range(self):
self.start()
nrecords = 200000000
nfields = 1
bufsize = 10000000 # this needs to be big enough to properly test the parallel implementation
data = np.arange(nrecords*nfields*3).reshape(nrecords, nfields, 3)
def get_record_count():
return data.shape[0]
def read_records(i,j):
return data[i:(i+j), :, :]
def validate(range_min,range_max):
self.assertEqual(range_min[0],data[0,0,0])
self.assertEqual(range_min[1],data[0,0,1])
self.assertEqual(range_min[2],data[0,0,2])
self.assertEqual(range_max[0],data[-1,0,0])
self.assertEqual(range_max[1],data[-1,0,1])
self.assertEqual(range_max[2],data[-1,0,2])
def reference_range(): # Roger's reference implementation
record_count = get_record_count()
if record_count < 1:
return
#first_point = kv.Project.read_records(src_name, 0, 1)
first_point = read_records(0, 1)
range_min = [first_point[0][0][0], first_point[0][0][1], first_point[0][0][2]]
range_max = range_min[:]
record_offset = 1
buf_size = min(bufsize, record_count)
while record_offset < record_count:
records_to_read = min(buf_size, record_count - record_offset)
# buf = kv.Project.read_records(src_name, record_offset, records_to_read)
buf = read_records(record_offset, records_to_read)
for i in range(records_to_read):
# kv.Thread.task_progress(record_offset / record_count * 100)
record_offset += 1
# if kv.Thread.is_cancelling():
# return
range_min[0] = min(buf[i][0][0], range_min[0])
range_min[1] = min(buf[i][0][1], range_min[1])
range_min[2] = min(buf[i][0][2], range_min[2])
range_max[0] = max(buf[i][0][0], range_max[0])
range_max[1] = max(buf[i][0][1], range_max[1])
range_max[2] = max(buf[i][0][2], range_max[2])
return range_min, range_max
def numpy_range(): # Minimum change numpy implementation, only replace inner loop
record_count = get_record_count()
if record_count < 1:
return
first_point = read_records(0, 1)
range_min = [first_point[0][0][0], first_point[0][0][1], first_point[0][0][2]]
range_max = range_min[:]
record_offset = 1
buf_size = min(bufsize, record_count)
while record_offset < record_count:
records_to_read = min(buf_size, record_count - record_offset)
buf = read_records(record_offset, records_to_read)
bx = buf[:,0,0]
by = buf[:,0,1]
bz = buf[:,0,2]
range_min = [min(range_min[0], np.nanmin(bx)),
min(range_min[1], np.nanmin(by)),
min(range_min[2], np.nanmin(bz))]
range_max = [max(range_max[0], np.nanmax(bx)),
max(range_max[1], np.nanmax(by)),
max(range_max[2], np.nanmax(bz))]
record_offset += buf.shape[0]
return range_min, range_max
def numpy_parallel_range(): # parallel implementation
record_count = get_record_count()
if record_count < 1:
return
first_point = read_records(0, 1)
range_min = [first_point[0][0][0], first_point[0][0][1], first_point[0][0][2]]
range_max = range_min[:]
record_offset = 1
buf_size = min(bufsize, record_count)
while record_offset < record_count:
records_to_read = min(buf_size, record_count - record_offset)
buf = read_records(record_offset, records_to_read)
# arrange the problem for three parallel threads
parallel = [(range_min[0], range_max[0], buf[:,0,0]),
(range_min[1], range_max[1], buf[:,0,1]),
(range_min[2], range_max[2], buf[:,0,2])]
# run in parallel
results = gsys.parallel_map(lambda a: (min(a[0], np.nanmin(a[2])), max(a[1], np.nanmax(a[2]))), parallel)
# update the ranges from the results, which are in a list of (min,max)
zip1,zip2 = zip(*results)
range_min = list(zip1)
range_max = list(zip2)
record_offset += buf.shape[0]
return range_min, range_max
def time_test(func, reference_time=None):
start = time.time()
range_min, range_max = func()
end = time.time()
# make sure result is correct
validate(range_min,range_max)
elapsed = end-start
if reference_time is None:
speed_improvement = 1
else:
speed_improvement = reference_time / elapsed
geosoft.gxpy.gx.gx().log("{}: time: {} seconds, {} times faster than reference".format(func.__name__, elapsed, speed_improvement))
return elapsed
#ref = time_test(reference_range)
ref = 0
ref = time_test(numpy_range, ref)
time_test(numpy_parallel_range, ref)
# testing documented parallel example
data = [(1+i, 2+i) for i in range(20)]
geosoft.gxpy.gx.gx().log('parallel:',gsys.parallel_map(lambda ab: ab[0] + ab[1], data))
###############################################################################################
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "eefde44525ae9f7916cb754d131da103",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 142,
"avg_line_length": 36.20289855072464,
"alnum_prop": 0.5221510541766746,
"repo_name": "GeosoftInc/gxpy",
"id": "2a88566182533ab2df3594edef138fa3ef8fba8b",
"size": "7494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geosoft/gxpy/tests/test_system.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4799134"
}
],
"symlink_target": ""
} |
"""Test framework for merelcoin utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def main():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "bitcoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, bitcoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| {
"content_hash": "4ca72977db8fa2a18bba8249bddfdfbb",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 125,
"avg_line_length": 37.074285714285715,
"alnum_prop": 0.6205302096177558,
"repo_name": "merelcoin/merelcoin",
"id": "be27545f7b7360852e4ce3ada87058f531474d8c",
"size": "6727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/util/bitcoin-util-test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "694223"
},
{
"name": "C++",
"bytes": "6032230"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "196429"
},
{
"name": "Makefile",
"bytes": "2491551"
},
{
"name": "NSIS",
"bytes": "6834"
},
{
"name": "Objective-C",
"bytes": "6153"
},
{
"name": "Objective-C++",
"bytes": "6588"
},
{
"name": "Python",
"bytes": "1474453"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Roff",
"bytes": "2559606"
},
{
"name": "Shell",
"bytes": "886663"
}
],
"symlink_target": ""
} |
__author__ = "Learning Equality"
__email__ = "info@learningequality.org"
__version__ = "0.7.0"
import sys
if sys.version_info < (3, 6, 0):
raise RuntimeError("Ricecooker only supports Python 3.6+")
| {
"content_hash": "4208c7f51a0a5512ea6238cb3ab5472f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 22.77777777777778,
"alnum_prop": 0.6585365853658537,
"repo_name": "learningequality/ricecooker",
"id": "b075c69bc445fb9e24d40638bc67ffd9520b1e3c",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ricecooker/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1901"
},
{
"name": "HTML",
"bytes": "1065"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "2800"
},
{
"name": "Python",
"bytes": "704340"
},
{
"name": "Shell",
"bytes": "1713"
}
],
"symlink_target": ""
} |
import os
from textwrap import dedent
import unittest2 as unittest
from squarepants.build_component import JarFilesMixin
from squarepants.file_utils import temporary_dir
from squarepants.pom_file import PomFile
from squarepants.pom_utils import PomUtils
class BuildComponentTest(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self._wd = os.getcwd()
PomUtils.reset_caches()
def tearDown(self):
# Restore the working directory
os.chdir(self._wd)
def test_format_jar_deps(self):
# Dependencies should be sorted in alphabetical order
self.assertEquals(dedent('''
jar_library(name='jar_files',
jars=[
'bar',
'baz',
'foo'
],
)
'''),
JarFilesMixin.format_jar_library('jar_files', ["'foo'", "'bar'", "'baz'"]))
# Duplicates should be suppressed
self.assertEquals(dedent('''
jar_library(name='jar_files',
jars=[
'baz',
'foo'
],
)
'''),
JarFilesMixin.format_jar_library('jar_files', ["'foo'", "'foo'", "'baz'"]))
# jar() entries shouldn't be truncated.
# NB(zundel): this was a problem when this method was implemented with with Target.jar_format()
self.assertEquals(dedent('''
jar_library(name='jar_files',
jars=[
jar(org='square', name='foobar'),
jar(org='square', name='qux', excludes=(org='com.example', name='cruft')),
jar(org='square', name='zzz')
],
)
'''),
JarFilesMixin.format_jar_library('jar_files', [
"jar(org='square', name='qux', excludes=(org='com.example', name='cruft'))",
"jar(org='square', name='foobar')",
"jar(org='square', name='zzz')"
],))
def test_format_jar_deps_symbols(self):
with temporary_dir() as temp_path:
parent_pom_contents = """<?xml version="1.0" encoding="UTF-8"?>
<project>
<groupId>com.example</groupId>
<artifactId>mock-parent</artifactId>
<version>HEAD-SNAPSHOT</version>
<properties>
<foo>1.2.3</foo>
</properties>
</project>
"""
mock_parent_pom_filename = os.path.join(temp_path, 'pom.xml')
with open(mock_parent_pom_filename, 'w') as f:
f.write(parent_pom_contents)
mock_path = os.path.join(temp_path, 'mock-project')
os.mkdir(mock_path)
mock_pom_filename = os.path.join(mock_path, 'pom.xml')
pom_contents = """<?xml version="1.0" encoding="UTF-8"?>
<project>
<groupId>com.example</groupId>
<artifactId>mock</artifactId>
<version>HEAD-SNAPSHOT</version>
<parent>
<groupId>com.example</groupId>
<artifactId>mock-project</artifactId>
<version>HEAD-SNAPSHOT</version>
<relativePath>../pom.xml</relativePath>
</parent>
</project>
"""
with open(mock_pom_filename, 'w') as f:
f.write(pom_contents)
mock_pom_file = PomFile(mock_pom_filename)
formatted_library = JarFilesMixin.format_jar_library('jar_files',
["jar(org='square', name='foobar', rev='${foo}')"],
pom_file=mock_pom_file)
self.assertEquals(dedent('''
jar_library(name='jar_files',
jars=[
jar(org='square', name='foobar', rev='1.2.3')
],
)
'''), formatted_library)
| {
"content_hash": "d79a82df081fdf167ecbc483504a58cc",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 106,
"avg_line_length": 39,
"alnum_prop": 0.45437745437745436,
"repo_name": "ericzundel/mvn2pants",
"id": "a378ce08cca2cd8a198585f7e2b21ff49883eaf3",
"size": "4499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/python/squarepants_test/test_build_component.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "283"
},
{
"name": "Python",
"bytes": "641401"
},
{
"name": "Shell",
"bytes": "240"
}
],
"symlink_target": ""
} |
from core.model.direction import Direction
class Board(object):
tiles = {}
def __init__(self, width=4, height=4):
self.width = width
self.height = height
for x in range(self.width):
for y in range(self.height):
self.tiles[(x, y)] = None
def unmark_merged_all(self):
for coord in self.tiles:
if self.tiles[coord] is not None:
self.tiles[coord].already_merged = False
def is_full(self):
for coords in self.tiles:
if self.tiles[coords] == None:
return False
return True
def add(self, position, tile):
if self.tiles[position] != None:
message = _("Slot with position %(pos)s already contains a tile")
raise Exception(message % {"pos": str(position)})
self.tiles[position] = tile
def remove(self, position):
if self.tiles[position] == None:
message = _("The slot with this position %(pos)s is already empty")
raise Exception(message % {"pos": str(position)})
self.tiles[position] = None
def move(self, old_position, new_position):
tile = self.tiles[old_position]
self.remove(old_position)
self.add(new_position, tile)
def next_by_predicate(self, position, direction, predicate):
candidate = None
if direction == Direction.Right:
for i in range(position[0] + 1, self.width):
if predicate((i, position[1])):
return (i, position[1])
if direction == Direction.Left:
for i in range(position[0] - 1, -1, -1):
if predicate((i, position[1])):
return (i, position[1])
if direction == Direction.Up:
for i in range(position[1] - 1, -1, -1):
if predicate((position[0], i)):
return (position[0], i)
if direction == Direction.Down:
for i in range(position[1] + 1, self.height):
if predicate((position[0], i)):
return (position[0], i)
return candidate
def last_by_predicate(self, position, direction, predicate):
candidate = None
if direction == Direction.Right:
for i in range(self.width - 1, position[0], -1):
if predicate((i, position[1])):
return (i, position[1])
if direction == Direction.Left:
for i in range(0, position[0]):
if predicate((i, position[1])):
return (i, position[1])
if direction == Direction.Up:
for i in range(0, position[1]):
if predicate((position[0], i)):
return (position[0], i)
if direction == Direction.Down:
for i in range(self.height - 1, position[1], -1):
if predicate((position[0], i)):
return (position[0], i)
return candidate
def next_free(self, position, direction):
return self.last_by_predicate(position, direction, lambda x: self.tiles[x] == None)
def next_full(self, position, direction):
return self.next_by_predicate(position, direction, lambda x: self.tiles[x] != None)
def get_empty_tiles(self):
w = self.width
h = self.height
coords = [(x, y) for x in range(w) for y in range(h)]
possible_cords = list()
for coord in coords:
if self.tiles[coord] is None:
possible_cords.append(coord)
return possible_cords
| {
"content_hash": "7ee426c0e4de384681ec30de3e2fd57e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 91,
"avg_line_length": 34.69230769230769,
"alnum_prop": 0.5399113082039911,
"repo_name": "the-dalee/gnome-2048",
"id": "aa657c0a3bb65c02c5f21d40a7f6034364612889",
"size": "3608",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/model/board.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2065"
},
{
"name": "Makefile",
"bytes": "1939"
},
{
"name": "Python",
"bytes": "43453"
},
{
"name": "Shell",
"bytes": "300"
}
],
"symlink_target": ""
} |
import pytest
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib/"))
import re
from dartsense.webapp import app as tl_app
tl_app.config['DEBUG'] = True
tl_app.config['TESTING'] = True
@pytest.fixture
def app(setup_db):
return tl_app
def login(client):
tl_app.config['TEST_LOGIN'] = True
res = client.get('/user', follow_redirects=True)
cookie = res.headers['Set-Cookie']
match = re.search(r'dartsense_session=([^;]+);', cookie)
sessionid = match.group(1)
client.set_cookie('localhost', 'dartsense_session', sessionid)
def test_list_index(client):
res = client.get('/list')
assert res.status_code == 401
res = client.get('/list/')
assert res.status_code == 401
login(client)
res = client.get('/list')
assert res.status_code == 200
res = client.get('/list/')
assert res.status_code == 200
def test_list_competitions(client):
res = client.get('/list/competitions')
assert res.status_code == 401
res = client.get('/list/competitions/')
assert res.status_code == 401
login(client)
res = client.get('/list/competitions')
assert res.status_code == 200
res = client.get('/list/competitions/')
assert res.status_code == 200
def test_list_competition(client):
res = client.get('/list/competition/' +
str(pytest.setup_vars['testtournament1_id']))
assert res.status_code == 401
res = client.get('/list/competition/99999999')
assert res.status_code == 401
login(client)
res = client.get('/list/competition/' +
str(pytest.setup_vars['testtournament1_id']))
assert res.status_code == 200
res = client.get('/list/competition/99999999')
assert res.status_code == 404
def test_list_player(client):
res = client.get('/list/player/' + str(pytest.setup_vars['player1_id']))
assert res.status_code == 401
res = client.get('/list/player/999999999')
assert res.status_code == 401
login(client)
res = client.get('/list/player/' + str(pytest.setup_vars['player1_id']))
assert res.status_code == 200
res = client.get('/list/player/999999999')
assert res.status_code == 404
def test_list_organisations(client):
res = client.get('/list/organisations')
assert res.status_code == 401
login(client)
res = client.get('/list/organisations')
assert res.status_code == 200
def test_list_organisation(client):
res = client.get('/list/organisation/' + str(pytest.setup_vars['organisation1_id']))
assert res.status_code == 401
login(client)
res = client.get('/list/organisation/' + str(pytest.setup_vars['organisation1_id']))
assert res.status_code == 200
def test_list_event(client):
res = client.get('/list/event/' + str(pytest.setup_vars['testcompetition1_round1_id']))
assert res.status_code == 401
login(client)
res = client.get('/list/event/' + str(pytest.setup_vars['testcompetition1_round1_id']))
assert res.status_code == 200
| {
"content_hash": "7e466052e6e25f0fda2d2bde32b24ecc",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 91,
"avg_line_length": 26.69298245614035,
"alnum_prop": 0.6496878080841275,
"repo_name": "basbloemsaat/dartsense",
"id": "615b4eba27b5704169c04c1496ff559b88a7b4f6",
"size": "3067",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/webapp/test_app_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "8751"
},
{
"name": "CSS",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "10402"
},
{
"name": "JavaScript",
"bytes": "195855"
},
{
"name": "Perl",
"bytes": "16188"
},
{
"name": "Python",
"bytes": "109770"
},
{
"name": "Shell",
"bytes": "209"
},
{
"name": "TSQL",
"bytes": "13782"
}
],
"symlink_target": ""
} |
mylist = [1, 2, 3] # this ideally comes from some place
assert 4 == len(mylist) # this will break
for position in range(4):
print(mylist[position])
| {
"content_hash": "ff929b5751ed44f655b72486035e50b9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 56,
"avg_line_length": 26,
"alnum_prop": 0.6730769230769231,
"repo_name": "mkhuthir/learnPython",
"id": "f8470a2b1a4c89181c6b2f2655c1bc3b871b9b5c",
"size": "156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Book_learning-python-r1.1/ch11/assertions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7706"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import uuid
import ldap as ldap
from keystone import assignment
from keystone import clean
from keystone.common import dependency
from keystone.common import driver_hints
from keystone.common import ldap as common_ldap
from keystone.common import models
from keystone import config
from keystone import exception
from keystone.identity.backends import ldap as ldap_identity
from keystone.openstack.common import log
CONF = config.CONF
LOG = log.getLogger(__name__)
@dependency.requires('identity_api')
class Assignment(assignment.Driver):
def __init__(self):
super(Assignment, self).__init__()
self.LDAP_URL = CONF.ldap.url
self.LDAP_USER = CONF.ldap.user
self.LDAP_PASSWORD = CONF.ldap.password
self.suffix = CONF.ldap.suffix
#These are the only deep dependency from assignment back
#to identity. The assumption is that if you are using
#LDAP for assignments, you are using it for Id as well.
self.user = ldap_identity.UserApi(CONF)
self.group = ldap_identity.GroupApi(CONF)
self.project = ProjectApi(CONF)
self.role = RoleApi(CONF)
def get_project(self, tenant_id):
return self._set_default_domain(self.project.get(tenant_id))
def list_projects(self, hints):
return self._set_default_domain(self.project.get_all())
def list_projects_in_domain(self, domain_id):
# We don't support multiple domains within this driver, so ignore
# any domain specified
return self.list_projects(driver_hints.Hints())
def get_project_by_name(self, tenant_name, domain_id):
self._validate_default_domain_id(domain_id)
return self._set_default_domain(self.project.get_by_name(tenant_name))
def create_project(self, tenant_id, tenant):
self.project.check_allow_create()
tenant = self._validate_default_domain(tenant)
tenant['name'] = clean.project_name(tenant['name'])
data = tenant.copy()
if 'id' not in data or data['id'] is None:
data['id'] = str(uuid.uuid4().hex)
if 'description' in data and data['description'] in ['', None]:
data.pop('description')
return self._set_default_domain(self.project.create(data))
def update_project(self, tenant_id, tenant):
self.project.check_allow_update()
tenant = self._validate_default_domain(tenant)
if 'name' in tenant:
tenant['name'] = clean.project_name(tenant['name'])
return self._set_default_domain(self.project.update(tenant_id, tenant))
def _get_metadata(self, user_id=None, tenant_id=None,
domain_id=None, group_id=None):
def _get_roles_for_just_user_and_project(user_id, tenant_id):
self.identity_api.get_user(user_id)
self.get_project(tenant_id)
return [self.role._dn_to_id(a.role_dn)
for a in self.role.get_role_assignments
(self.project._id_to_dn(tenant_id))
if self.user._dn_to_id(a.user_dn) == user_id]
def _get_roles_for_group_and_project(group_id, project_id):
self.identity_api.get_group(group_id)
self.get_project(project_id)
group_dn = self.group._id_to_dn(group_id)
# NOTE(marcos-fermin-lobo): In Active Directory, for functions
# such as "self.role.get_role_assignments", it returns
# the key "CN" or "OU" in uppercase.
# The group_dn var has "CN" and "OU" in lowercase.
# For this reason, it is necessary to use the "upper()"
# function so both are consistent.
return [self.role._dn_to_id(a.role_dn)
for a in self.role.get_role_assignments
(self.project._id_to_dn(project_id))
if a.user_dn.upper() == group_dn.upper()]
if domain_id is not None:
msg = 'Domain metadata not supported by LDAP'
raise exception.NotImplemented(message=msg)
if group_id is None and user_id is None:
return {}
if tenant_id is None:
return {}
if user_id is None:
metadata_ref = _get_roles_for_group_and_project(group_id,
tenant_id)
else:
metadata_ref = _get_roles_for_just_user_and_project(user_id,
tenant_id)
if not metadata_ref:
return {}
return {'roles': [self._role_to_dict(r, False) for r in metadata_ref]}
def get_role(self, role_id):
return self.role.get(role_id)
def list_roles(self, hints):
return self.role.get_all()
def list_projects_for_user(self, user_id, group_ids, hints):
# NOTE(henry-nash): The LDAP backend is being deprecated, so no
# support is provided for projects that the user has a role on solely
# by virtue of group membership.
self.identity_api.get_user(user_id)
user_dn = self.user._id_to_dn(user_id)
associations = (self.role.list_project_roles_for_user
(user_dn, self.project.tree_dn))
# Since the LDAP backend doesn't store the domain_id in the LDAP
# records (and only supports the default domain), we fill in the
# domain_id before we return the list.
return [self._set_default_domain(x) for x in
self.project.get_user_projects(user_dn, associations)]
def list_user_ids_for_project(self, tenant_id):
self.get_project(tenant_id)
tenant_dn = self.project._id_to_dn(tenant_id)
rolegrants = self.role.get_role_assignments(tenant_dn)
return [self.user._dn_to_id(user_dn) for user_dn in
self.project.get_user_dns(tenant_id, rolegrants)]
def _subrole_id_to_dn(self, role_id, tenant_id):
if tenant_id is None:
return self.role._id_to_dn(role_id)
else:
return '%s=%s,%s' % (self.role.id_attr,
ldap.dn.escape_dn_chars(role_id),
self.project._id_to_dn(tenant_id))
def add_role_to_user_and_project(self, user_id, tenant_id, role_id):
self.identity_api.get_user(user_id)
self.get_project(tenant_id)
self.get_role(role_id)
user_dn = self.user._id_to_dn(user_id)
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
self.role.add_user(role_id, role_dn, user_dn, user_id, tenant_id)
tenant_dn = self.project._id_to_dn(tenant_id)
return UserRoleAssociation(role_dn=role_dn,
user_dn=user_dn,
tenant_dn=tenant_dn)
def _add_role_to_group_and_project(self, group_id, tenant_id, role_id):
self.identity_api.get_group(group_id)
self.get_project(tenant_id)
self.get_role(role_id)
group_dn = self.group._id_to_dn(group_id)
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
self.role.add_user(role_id, role_dn, group_dn, group_id, tenant_id)
tenant_dn = self.project._id_to_dn(tenant_id)
return GroupRoleAssociation(group_dn=group_dn,
role_dn=role_dn,
tenant_dn=tenant_dn)
def _create_metadata(self, user_id, tenant_id, metadata):
return {}
def create_role(self, role_id, role):
self.role.check_allow_create()
try:
self.get_role(role_id)
except exception.NotFound:
pass
else:
msg = 'Duplicate ID, %s.' % role_id
raise exception.Conflict(type='role', details=msg)
try:
self.role.get_by_name(role['name'])
except exception.NotFound:
pass
else:
msg = 'Duplicate name, %s.' % role['name']
raise exception.Conflict(type='role', details=msg)
return self.role.create(role)
def delete_role(self, role_id):
self.role.check_allow_delete()
return self.role.delete(role_id, self.project.tree_dn)
def delete_project(self, tenant_id):
self.project.check_allow_delete()
if self.project.subtree_delete_enabled:
self.project.deleteTree(tenant_id)
else:
tenant_dn = self.project._id_to_dn(tenant_id)
self.role.roles_delete_subtree_by_project(tenant_dn)
self.project.delete(tenant_id)
def remove_role_from_user_and_project(self, user_id, tenant_id, role_id):
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
return self.role.delete_user(role_dn,
self.user._id_to_dn(user_id),
self.project._id_to_dn(tenant_id),
user_id, role_id)
def _remove_role_from_group_and_project(self, group_id, tenant_id,
role_id):
role_dn = self._subrole_id_to_dn(role_id, tenant_id)
return self.role.delete_user(role_dn,
self.group._id_to_dn(group_id),
self.project._id_to_dn(tenant_id),
group_id, role_id)
def update_role(self, role_id, role):
self.role.check_allow_update()
self.get_role(role_id)
return self.role.update(role_id, role)
def create_domain(self, domain_id, domain):
if domain_id == CONF.identity.default_domain_id:
msg = 'Duplicate ID, %s.' % domain_id
raise exception.Conflict(type='domain', details=msg)
raise exception.Forbidden('Domains are read-only against LDAP')
def get_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
return assignment.calc_default_domain()
def update_domain(self, domain_id, domain):
self._validate_default_domain_id(domain_id)
raise exception.Forbidden('Domains are read-only against LDAP')
def delete_domain(self, domain_id):
self._validate_default_domain_id(domain_id)
raise exception.Forbidden('Domains are read-only against LDAP')
def list_domains(self, hints):
return [assignment.calc_default_domain()]
#Bulk actions on User From identity
def delete_user(self, user_id):
user_dn = self.user._id_to_dn(user_id)
for ref in self.role.list_global_roles_for_user(user_dn):
self.role.delete_user(ref.role_dn, ref.user_dn, ref.project_dn,
user_id, self.role._dn_to_id(ref.role_dn))
for ref in self.role.list_project_roles_for_user(user_dn,
self.project.tree_dn):
self.role.delete_user(ref.role_dn, ref.user_dn, ref.project_dn,
user_id, self.role._dn_to_id(ref.role_dn))
user = self.user.get(user_id)
if hasattr(user, 'tenant_id'):
self.project.remove_user(user.tenant_id,
self.user._id_to_dn(user_id))
#LDAP assignments only supports LDAP identity. Assignments under identity
#are already deleted
def delete_group(self, group_id):
if not self.group.subtree_delete_enabled:
# TODO(spzala): this is only placeholder for group and domain
# role support which will be added under bug 1101287
query = '(objectClass=%s)' % self.group.object_class
dn = None
dn = self.group._id_to_dn(group_id)
if dn:
try:
conn = self.group.get_connection()
roles = conn.search_s(dn, ldap.SCOPE_ONELEVEL,
query, ['%s' % '1.1'])
for role_dn, _ in roles:
conn.delete_s(role_dn)
except ldap.NO_SUCH_OBJECT:
pass
finally:
conn.unbind_s()
def create_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
self.get_role(role_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
if project_id and inherited_to_projects:
msg = _('Inherited roles can only be assigned to domains')
raise exception.Conflict(type='role grant', details=msg)
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
if user_id is None:
metadata_ref['roles'] = self._add_role_to_group_and_project(
group_id, project_id, role_id)
else:
metadata_ref['roles'] = self.add_role_to_user_and_project(
user_id, project_id, role_id)
def get_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
role_ref = self.get_role(role_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
role_ids = set(self._roles_from_role_dicts(
metadata_ref.get('roles', []), inherited_to_projects))
if role_id not in role_ids:
raise exception.RoleNotFound(role_id=role_id)
return role_ref
def delete_grant(self, role_id, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if user_id:
self.identity_api.get_user(user_id)
if group_id:
self.identity_api.get_group(group_id)
self.get_role(role_id)
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
try:
if user_id is None:
metadata_ref['roles'] = (
self._remove_role_from_group_and_project(
group_id, project_id, role_id))
else:
metadata_ref['roles'] = self.remove_role_from_user_and_project(
user_id, project_id, role_id)
except KeyError:
raise exception.RoleNotFound(role_id=role_id)
def list_grants(self, user_id=None, group_id=None,
domain_id=None, project_id=None,
inherited_to_projects=False):
if domain_id:
self.get_domain(domain_id)
if project_id:
self.get_project(project_id)
try:
metadata_ref = self._get_metadata(user_id, project_id,
domain_id, group_id)
except exception.MetadataNotFound:
metadata_ref = {}
return [self.get_role(x) for x in
self._roles_from_role_dicts(metadata_ref.get('roles', []),
inherited_to_projects)]
def get_domain_by_name(self, domain_name):
raise exception.NotImplemented()
def list_role_assignments(self):
role_assignments = []
for a in self.role.list_role_assignments(self.project.tree_dn):
assignment = {'role_id': self.role._dn_to_id(a.role_dn),
'user_id': self.user._dn_to_id(a.user_dn),
'project_id': self.project._dn_to_id(a.project_dn)}
role_assignments.append(assignment)
return role_assignments
# TODO(termie): turn this into a data object and move logic to driver
class ProjectApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap):
DEFAULT_OU = 'ou=Groups'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'groupOfNames'
DEFAULT_ID_ATTR = 'cn'
DEFAULT_MEMBER_ATTRIBUTE = 'member'
NotFound = exception.ProjectNotFound
notfound_arg = 'project_id' # NOTE(yorik-sar): while options_name = tenant
options_name = 'tenant'
attribute_options_names = {'name': 'name',
'description': 'desc',
'enabled': 'enabled',
'domain_id': 'domain_id'}
immutable_attrs = ['name']
model = models.Project
def __init__(self, conf):
super(ProjectApi, self).__init__(conf)
self.member_attribute = (getattr(conf.ldap, 'tenant_member_attribute')
or self.DEFAULT_MEMBER_ATTRIBUTE)
def create(self, values):
data = values.copy()
if data.get('id') is None:
data['id'] = uuid.uuid4().hex
return super(ProjectApi, self).create(data)
def get_user_projects(self, user_dn, associations):
"""Returns list of tenants a user has access to
"""
project_ids = set()
for assoc in associations:
project_ids.add(self._dn_to_id(assoc.project_dn))
projects = []
for project_id in project_ids:
#slower to get them one at a time, but a huge list could blow out
#the connection. This is the safer way
projects.append(self.get(project_id))
return projects
def add_user(self, tenant_id, user_dn):
conn = self.get_connection()
try:
conn.modify_s(
self._id_to_dn(tenant_id),
[(ldap.MOD_ADD,
self.member_attribute,
user_dn)])
except ldap.TYPE_OR_VALUE_EXISTS:
# As adding a user to a tenant is done implicitly in several
# places, and is not part of the exposed API, it's easier for us to
# just ignore this instead of raising exception.Conflict.
pass
finally:
conn.unbind_s()
def remove_user(self, tenant_id, user_dn, user_id):
conn = self.get_connection()
try:
conn.modify_s(self._id_to_dn(tenant_id),
[(ldap.MOD_DELETE,
self.member_attribute,
user_dn)])
except ldap.NO_SUCH_ATTRIBUTE:
raise exception.NotFound(user_id)
finally:
conn.unbind_s()
def get_user_dns(self, tenant_id, rolegrants, role_dn=None):
tenant = self._ldap_get(tenant_id)
res = set()
if not role_dn:
# Get users who have default tenant mapping
for user_dn in tenant[1].get(self.member_attribute, []):
if self.use_dumb_member and user_dn == self.dumb_member:
continue
res.add(user_dn)
# Get users who are explicitly mapped via a tenant
for rolegrant in rolegrants:
if role_dn is None or rolegrant.role_dn == role_dn:
res.add(rolegrant.user_dn)
return list(res)
def update(self, project_id, values):
old_obj = self.get(project_id)
return super(ProjectApi, self).update(project_id, values, old_obj)
class UserRoleAssociation(object):
"""Role Grant model."""
def __init__(self, user_dn=None, role_dn=None, tenant_dn=None,
*args, **kw):
self.user_dn = user_dn
self.role_dn = role_dn
self.project_dn = tenant_dn
class GroupRoleAssociation(object):
"""Role Grant model."""
def __init__(self, group_dn=None, role_dn=None, tenant_dn=None,
*args, **kw):
self.group_dn = group_dn
self.role_dn = role_dn
self.project_dn = tenant_dn
# TODO(termie): turn this into a data object and move logic to driver
class RoleApi(common_ldap.BaseLdap):
DEFAULT_OU = 'ou=Roles'
DEFAULT_STRUCTURAL_CLASSES = []
DEFAULT_OBJECTCLASS = 'organizationalRole'
DEFAULT_MEMBER_ATTRIBUTE = 'roleOccupant'
NotFound = exception.RoleNotFound
options_name = 'role'
attribute_options_names = {'name': 'name'}
immutable_attrs = ['id']
model = models.Role
def __init__(self, conf):
super(RoleApi, self).__init__(conf)
self.member_attribute = (getattr(conf.ldap, 'role_member_attribute')
or self.DEFAULT_MEMBER_ATTRIBUTE)
def get(self, role_id, role_filter=None):
model = super(RoleApi, self).get(role_id, role_filter)
return model
def create(self, values):
return super(RoleApi, self).create(values)
def add_user(self, role_id, role_dn, user_dn, user_id, tenant_id=None):
conn = self.get_connection()
try:
conn.modify_s(role_dn, [(ldap.MOD_ADD,
self.member_attribute, user_dn)])
except ldap.TYPE_OR_VALUE_EXISTS:
msg = ('User %s already has role %s in tenant %s'
% (user_id, role_id, tenant_id))
raise exception.Conflict(type='role grant', details=msg)
except ldap.NO_SUCH_OBJECT:
if tenant_id is None or self.get(role_id) is None:
raise Exception(_("Role %s not found") % (role_id,))
attrs = [('objectClass', [self.object_class]),
(self.member_attribute, [user_dn])]
if self.use_dumb_member:
attrs[1][1].append(self.dumb_member)
try:
conn.add_s(role_dn, attrs)
except Exception as inst:
raise inst
finally:
conn.unbind_s()
def delete_user(self, role_dn, user_dn, tenant_dn,
user_id, role_id):
conn = self.get_connection()
try:
conn.modify_s(role_dn, [(ldap.MOD_DELETE,
self.member_attribute, user_dn)])
except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE):
raise exception.RoleNotFound(message=_(
'Cannot remove role that has not been granted, %s') %
role_id)
finally:
conn.unbind_s()
def get_role_assignments(self, tenant_dn):
conn = self.get_connection()
query = '(objectClass=%s)' % self.object_class
try:
roles = conn.search_s(tenant_dn, ldap.SCOPE_ONELEVEL, query)
except ldap.NO_SUCH_OBJECT:
return []
finally:
conn.unbind_s()
res = []
for role_dn, attrs in roles:
try:
user_dns = attrs[self.member_attribute]
except KeyError:
continue
for user_dn in user_dns:
if self.use_dumb_member and user_dn == self.dumb_member:
continue
res.append(UserRoleAssociation(
user_dn=user_dn,
role_dn=role_dn,
tenant_dn=tenant_dn))
return res
def list_global_roles_for_user(self, user_dn):
roles = self.get_all('(%s=%s)' % (self.member_attribute, user_dn))
return [UserRoleAssociation(
role_dn=role.dn,
user_dn=user_dn) for role in roles]
def list_project_roles_for_user(self, user_dn, project_subtree):
conn = self.get_connection()
query = '(&(objectClass=%s)(%s=%s))' % (self.object_class,
self.member_attribute,
user_dn)
try:
roles = conn.search_s(project_subtree,
ldap.SCOPE_SUBTREE,
query)
except ldap.NO_SUCH_OBJECT:
return []
finally:
conn.unbind_s()
res = []
for role_dn, _ in roles:
#ldap.dn.dn2str returns an array, where the first
#element is the first segment.
#For a role assignment, this contains the role ID,
#The remainder is the DN of the tenant.
tenant = ldap.dn.str2dn(role_dn)
tenant.pop(0)
tenant_dn = ldap.dn.dn2str(tenant)
res.append(UserRoleAssociation(
user_dn=user_dn,
role_dn=role_dn,
tenant_dn=tenant_dn))
return res
def roles_delete_subtree_by_project(self, tenant_dn):
conn = self.get_connection()
query = '(objectClass=%s)' % self.object_class
try:
roles = conn.search_s(tenant_dn, ldap.SCOPE_ONELEVEL, query)
for role_dn, _ in roles:
try:
conn.delete_s(role_dn)
except Exception as inst:
raise inst
except ldap.NO_SUCH_OBJECT:
pass
finally:
conn.unbind_s()
def update(self, role_id, role):
try:
old_name = self.get_by_name(role['name'])
raise exception.Conflict('Cannot duplicate name %s' % old_name)
except exception.NotFound:
pass
return super(RoleApi, self).update(role_id, role)
def delete(self, role_id, tenant_dn):
conn = self.get_connection()
query = '(&(objectClass=%s)(%s=%s))' % (self.object_class,
self.id_attr, role_id)
try:
for role_dn, _ in conn.search_s(tenant_dn,
ldap.SCOPE_SUBTREE,
query):
conn.delete_s(role_dn)
except ldap.NO_SUCH_OBJECT:
pass
finally:
conn.unbind_s()
super(RoleApi, self).delete(role_id)
def list_role_assignments(self, project_tree_dn):
"""Returns a list of all the role assignments linked to project_tree_dn
attribute.
"""
conn = self.get_connection()
query = '(objectClass=%s)' % (self.object_class)
try:
roles = conn.search_s(project_tree_dn,
ldap.SCOPE_SUBTREE,
query)
except ldap.NO_SUCH_OBJECT:
return []
finally:
conn.unbind_s()
res = []
for role_dn, role in roles:
tenant = ldap.dn.str2dn(role_dn)
tenant.pop(0)
# It obtains the tenant DN to construct the UserRoleAssociation
# object.
tenant_dn = ldap.dn.dn2str(tenant)
for user_dn in role[self.member_attribute]:
res.append(UserRoleAssociation(
user_dn=user_dn,
role_dn=role_dn,
tenant_dn=tenant_dn))
return res
| {
"content_hash": "47edb2b369ff65ca52167bc457c4db8b",
"timestamp": "",
"source": "github",
"line_count": 702,
"max_line_length": 79,
"avg_line_length": 39.027065527065524,
"alnum_prop": 0.5477607037266854,
"repo_name": "derekchiang/keystone",
"id": "e3023886e08dbdf9f5eda0b53ab64905f979ebe8",
"size": "28032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/assignment/backends/ldap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2833790"
},
{
"name": "Shell",
"bytes": "10512"
}
],
"symlink_target": ""
} |
import os
import sys
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.rst')).read()
requires = [
'snovault',
'Pillow',
'PyBrowserID',
'SQLAlchemy>=1.0.0b1',
'WSGIProxy2',
'WebTest',
'boto',
'botocore',
'jmespath',
'boto3',
'elasticsearch',
'lucenequery',
'future',
'humanfriendly',
'jsonschema_serialize_fork',
'loremipsum',
'netaddr',
'passlib',
'psutil',
'pyramid',
'pyramid_localroles',
'pyramid_multiauth',
'pyramid_tm',
'python-magic',
'pytz',
'rdflib',
'rdflib-jsonld',
'rfc3987',
'setuptools',
'simplejson',
'strict_rfc3339',
'subprocess_middleware',
'xlrd',
'zope.sqlalchemy',
]
if sys.version_info.major == 2:
requires.extend([
'backports.functools_lru_cache',
'subprocess32',
])
tests_require = [
'pytest>=2.4.0',
'pytest-bdd',
'pytest-mock',
'pytest-splinter',
'pytest_exact_fixtures',
]
setup(
name='encoded',
version='66.0',
description='Metadata database for ENCODE',
long_description=README + '\n\n' + CHANGES,
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
author='Laurence Rowe',
author_email='lrowe@stanford.edu',
url='http://encode-dcc.org',
license='MIT',
install_requires=requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
},
entry_points='''
[console_scripts]
batchupgrade = snovault.batchupgrade:main
create-mapping = snovault.elasticsearch.create_mapping:main
dev-servers = snovault.dev_servers:main
es-index-listener = snovault.elasticsearch.es_index_listener:main
add-date-created = encoded.commands.add_date_created:main
check-rendering = encoded.commands.check_rendering:main
deploy = encoded.commands.deploy:main
extract_test_data = encoded.commands.extract_test_data:main
es-index-data = encoded.commands.es_index_data:main
generate-ontology = encoded.commands.generate_ontology:main
import-data = encoded.commands.import_data:main
jsonld-rdf = encoded.commands.jsonld_rdf:main
migrate-files-aws = encoded.commands.migrate_files_aws:main
profile = encoded.commands.profile:main
spreadsheet-to-json = encoded.commands.spreadsheet_to_json:main
generate-annotations = encoded.commands.generate_annotations:main
index-annotations = encoded.commands.index_annotations:main
migrate-attachments-aws = encoded.commands.migrate_attachments_aws:main
migrate-dataset-type = encoded.commands.migrate_dataset_type:main
[paste.app_factory]
main = encoded:main
[paste.composite_factory]
indexer = snovault.elasticsearch.es_index_listener:composite
visindexer = snovault.elasticsearch.es_index_listener:composite
[paste.filter_app_factory]
memlimit = encoded.memlimit:filter_app
''',
)
| {
"content_hash": "607892a943cec8a3e1f6d93cfa2a8413",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 28.92792792792793,
"alnum_prop": 0.6474618498909996,
"repo_name": "T2DREAM/t2dream-portal",
"id": "2d9fcd89f8c25e8dc75d0e0e51e42d950b0786e3",
"size": "3211",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Gherkin",
"bytes": "16776"
},
{
"name": "HTML",
"bytes": "373076"
},
{
"name": "JavaScript",
"bytes": "1320205"
},
{
"name": "Makefile",
"bytes": "106"
},
{
"name": "Python",
"bytes": "1567328"
},
{
"name": "SCSS",
"bytes": "336182"
},
{
"name": "Shell",
"bytes": "4199"
}
],
"symlink_target": ""
} |
"""Config flow for Yeelight integration."""
import logging
import voluptuous as vol
import yeelight
from homeassistant import config_entries, exceptions
from homeassistant.const import CONF_DEVICE, CONF_HOST, CONF_ID, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import (
CONF_MODE_MUSIC,
CONF_MODEL,
CONF_NIGHTLIGHT_SWITCH,
CONF_NIGHTLIGHT_SWITCH_TYPE,
CONF_SAVE_ON_CHANGE,
CONF_TRANSITION,
DOMAIN,
NIGHTLIGHT_SWITCH_TYPE_LIGHT,
_async_unique_name,
)
_LOGGER = logging.getLogger(__name__)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Yeelight."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Return the options flow."""
return OptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the config flow."""
self._discovered_devices = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
if user_input.get(CONF_HOST):
try:
await self._async_try_connect(user_input[CONF_HOST])
return self.async_create_entry(
title=user_input[CONF_HOST],
data=user_input,
)
except CannotConnect:
errors["base"] = "cannot_connect"
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
else:
return await self.async_step_pick_device()
user_input = user_input or {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Optional(CONF_HOST, default=user_input.get(CONF_HOST, "")): str}
),
errors=errors,
)
async def async_step_pick_device(self, user_input=None):
"""Handle the step to pick discovered device."""
if user_input is not None:
unique_id = user_input[CONF_DEVICE]
capabilities = self._discovered_devices[unique_id]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=_async_unique_name(capabilities),
data={CONF_ID: unique_id},
)
configured_devices = {
entry.data[CONF_ID]
for entry in self._async_current_entries()
if entry.data[CONF_ID]
}
devices_name = {}
# Run 3 times as packets can get lost
for _ in range(3):
devices = await self.hass.async_add_executor_job(yeelight.discover_bulbs)
for device in devices:
capabilities = device["capabilities"]
unique_id = capabilities["id"]
if unique_id in configured_devices:
continue # ignore configured devices
model = capabilities["model"]
host = device["ip"]
name = f"{host} {model} {unique_id}"
self._discovered_devices[unique_id] = capabilities
devices_name[unique_id] = name
# Check if there is at least one device
if not devices_name:
return self.async_abort(reason="no_devices_found")
return self.async_show_form(
step_id="pick_device",
data_schema=vol.Schema({vol.Required(CONF_DEVICE): vol.In(devices_name)}),
)
async def async_step_import(self, user_input=None):
"""Handle import step."""
host = user_input[CONF_HOST]
try:
await self._async_try_connect(host)
except CannotConnect:
_LOGGER.error("Failed to import %s: cannot connect", host)
return self.async_abort(reason="cannot_connect")
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
if CONF_NIGHTLIGHT_SWITCH_TYPE in user_input:
user_input[CONF_NIGHTLIGHT_SWITCH] = (
user_input.pop(CONF_NIGHTLIGHT_SWITCH_TYPE)
== NIGHTLIGHT_SWITCH_TYPE_LIGHT
)
return self.async_create_entry(title=user_input[CONF_NAME], data=user_input)
async def _async_try_connect(self, host):
"""Set up with options."""
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == host:
raise AlreadyConfigured
bulb = yeelight.Bulb(host)
try:
capabilities = await self.hass.async_add_executor_job(bulb.get_capabilities)
if capabilities is None: # timeout
_LOGGER.debug("Failed to get capabilities from %s: timeout", host)
else:
_LOGGER.debug("Get capabilities: %s", capabilities)
await self.async_set_unique_id(capabilities["id"])
self._abort_if_unique_id_configured()
return
except OSError as err:
_LOGGER.debug("Failed to get capabilities from %s: %s", host, err)
# Ignore the error since get_capabilities uses UDP discovery packet
# which does not work in all network environments
# Fallback to get properties
try:
await self.hass.async_add_executor_job(bulb.get_properties)
except yeelight.BulbException as err:
_LOGGER.error("Failed to get properties from %s: %s", host, err)
raise CannotConnect from err
_LOGGER.debug("Get properties: %s", bulb.last_properties)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Yeelight."""
def __init__(self, config_entry):
"""Initialize the option flow."""
self._config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle the initial step."""
if user_input is not None:
options = {**self._config_entry.options}
options.update(user_input)
return self.async_create_entry(title="", data=options)
options = self._config_entry.options
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_MODEL, default=options[CONF_MODEL]): str,
vol.Required(
CONF_TRANSITION,
default=options[CONF_TRANSITION],
): cv.positive_int,
vol.Required(
CONF_MODE_MUSIC, default=options[CONF_MODE_MUSIC]
): bool,
vol.Required(
CONF_SAVE_ON_CHANGE,
default=options[CONF_SAVE_ON_CHANGE],
): bool,
vol.Required(
CONF_NIGHTLIGHT_SWITCH,
default=options[CONF_NIGHTLIGHT_SWITCH],
): bool,
}
),
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class AlreadyConfigured(exceptions.HomeAssistantError):
"""Indicate the ip address is already configured."""
| {
"content_hash": "9df7d308d8568eb95bb734cb5d3c2c67",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 88,
"avg_line_length": 37.163366336633665,
"alnum_prop": 0.5688024510456907,
"repo_name": "adrienbrault/home-assistant",
"id": "0473cc1042c077196f94e7a3e2153318c0c275ff",
"size": "7507",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/yeelight/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Generate some synthetic time series for six different categories
cats = list("abcdef")
y = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
y[g == l] += i // 2
df = pd.DataFrame(dict(score=y, group=g))
# Find the quartiles and IQR foor each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q2 + 1.5*iqr
lower = q2 - 1.5*iqr
# find the outliers for each category
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need coordinate for every outlier.
outx = []
outy = []
for cat in cats:
# only add outliers if they exist
if not out.loc[cat].empty:
for value in out[cat]:
outx.append(cat)
outy.append(value)
output_file("boxplot.html")
figure(tools="previewsave", background_fill="#EFE8E2", title="")
hold()
# If no outliers, shrink lengths of stems to be no longer than the minimums or maximums
qmin = groups.quantile(q=0.00)
qmax = groups.quantile(q=1.00)
upper.score = [min([x,y]) for (x,y) in zip(list(qmax.iloc[:,0]),upper.score) ]
lower.score = [max([x,y]) for (x,y) in zip(list(qmin.iloc[:,0]),lower.score) ]
# stems
segment(cats, upper.score, cats, q3.score, x_range=cats,
line_width=2, line_color="black", )
segment(cats, lower.score, cats, q1.score, x_range=cats,
line_width=2, line_color="black")
# boxes
rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
rect(cats, (q2.score+q1.score)/2, 0.7, q2.score-q1.score,
fill_color="#3B8686", line_width=2, line_color="black")
# whisters (0-height rects simpler than segments)
rect(cats, lower.score, 0.2, 0, line_color="black")
rect(cats, upper.score, 0.2, 0, line_color="black")
# outliers
circle(outx, outy, size=6, color="#F38630", fill_alpha=0.6)
xgrid().grid_line_color = None
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
xaxis().major_label_text_font_size="12pt"
show()
| {
"content_hash": "c8baa5ea7474a7b1fbc80e7af9fc28dc",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 95,
"avg_line_length": 30.931506849315067,
"alnum_prop": 0.6722763507528786,
"repo_name": "jakevdp/bokeh",
"id": "736b411ae2a1e9d73a5783bec1e66c291eec104b",
"size": "2258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plotting/file/boxplot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('crimes2016OnlyLocationsTHEFTandBATTERY.csv')
X = dataset.iloc[:, 1:3].values
y = dataset.iloc[:, 0].values
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Part 2 - Now let's make the ANN!
# Importing the Keras libraries and packages
import keras
from keras.models import Sequential
from keras.layers import Dense
# Initialising the ANN
classifier = Sequential()
# Adding the input layer and the first hidden layer
classifier.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu', input_dim = 2))
# Adding the second hidden layer
classifier.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu'))
classifier.add(Dense(output_dim = 3, init = 'uniform', activation = 'relu'))
# Adding the output layer
classifier.add(Dense(output_dim = 1, init = 'uniform', activation = 'sigmoid'))
# Compiling the ANN
classifier.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
# Fitting the ANN to the Training set
classifier.fit(X_train, y_train, batch_size = 5, nb_epoch = 100)
# Part 3 - Making the predictions and evaluating the model
# Predicting the Test set results
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred) | {
"content_hash": "c04398f5e44237c7b1ecb1dede54322a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 92,
"avg_line_length": 30.509090909090908,
"alnum_prop": 0.7377830750893921,
"repo_name": "jeanlks/ARP",
"id": "42fa6e60cbdea40c5130e3268d49c4a583fc7085",
"size": "1679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "finalProject/NeuralNetwork.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "17234"
},
{
"name": "PostScript",
"bytes": "2720432"
},
{
"name": "Python",
"bytes": "52742"
},
{
"name": "R",
"bytes": "10731"
}
],
"symlink_target": ""
} |
class EzConstants(object):
def __init__(self, serverport, clientport):
self.serverport = serverport
self.clientport = clientport
currentConstants = EzConstants(5611, 5611)
| {
"content_hash": "7c06abcae418e0fe63f90eb9090a9066",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 47,
"avg_line_length": 27.714285714285715,
"alnum_prop": 0.7010309278350515,
"repo_name": "WheelBarrow2/EzFileSender",
"id": "b2cb2952545539bbb22722737eefb8a4f1823614",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41431"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
from hamcrest import equal_to, assert_that, raises, none, instance_of
from storops.exception import VNXBackendError, VNXInvalidMoverID, \
VNXFsExistedError
from storops.vnx.resource.fs import VNXFileSystem, VNXFileSystemList
from storops_test.vnx.nas_mock import patch_post, t_nas
__author__ = 'Jay Xu'
class FileSystemTest(unittest.TestCase):
@staticmethod
def verify_root_fs_1(fs):
assert_that(fs.fs_id, equal_to(1))
assert_that(fs.internal_use, equal_to(True))
assert_that(fs.name, equal_to('root_fs_1'))
assert_that(fs.volume, equal_to(10))
assert_that(fs.policies, none())
assert_that(fs.pools, none())
assert_that(fs.storages, equal_to(1))
assert_that(fs.type, equal_to('uxfs'))
assert_that(fs.size, equal_to(16))
@staticmethod
def verify_fs_src0(fs):
assert_that(fs.fs_id, equal_to(37))
assert_that(fs.internal_use, equal_to(False))
assert_that(fs.name, equal_to('fs_src0'))
assert_that(fs.policies, equal_to(
'Thin=No,Compressed=No,Mirrored=No,'
'Tiering policy=N/A/Optimize Pool'))
assert_that(fs.pools, equal_to([59]))
assert_that(fs.size, equal_to(1024))
assert_that(fs.storages, equal_to(1))
assert_that(fs.type, equal_to('uxfs'))
assert_that(fs.volume, equal_to(150))
@patch_post
def test_clz_get_fs_success(self):
fs = VNXFileSystem.get(name='fs_src0', cli=t_nas())
self.verify_fs_src0(fs)
@patch_post(output='abc.xml')
def test_clz_get_fs_empty(self):
def f():
fs = VNXFileSystem.get(name='fs_src0', cli=t_nas())
assert_that(fs.existed, equal_to(False))
assert_that(f, raises(IOError))
@patch_post
def test_clz_get_all(self):
fs_list = VNXFileSystem.get(cli=t_nas())
assert_that(len(fs_list), equal_to(25))
@patch_post
def test_get(self):
fs = VNXFileSystem('fs_src0', cli=t_nas())
self.verify_fs_src0(fs)
@patch_post
def test_get_by_id(self):
fs = VNXFileSystem(fs_id=27, cli=t_nas())
assert_that(fs.existed, equal_to(True))
assert_that(fs.volume, equal_to(125))
@patch_post
def test_get_not_found(self):
fs = VNXFileSystem('abc', cli=t_nas())
assert_that(fs._get_name(), equal_to('abc'))
assert_that(fs.existed, equal_to(False))
@patch_post
def test_get_all(self):
fs_list = VNXFileSystemList(t_nas())
assert_that(len(fs_list), equal_to(25))
root_fs_1 = [fs for fs in fs_list if fs.name == 'root_fs_1'][0]
self.verify_root_fs_1(root_fs_1)
fs_src0 = [fs for fs in fs_list if fs.name == 'fs_src0'][0]
self.verify_fs_src0(fs_src0)
@patch_post
def test_delete_fs_not_exists(self):
def f():
fs = VNXFileSystem(fs_id=99, cli=t_nas())
fs.delete()
assert_that(f, raises(VNXBackendError, 'not found'))
@patch_post
def test_delete_fs_success(self):
fs = VNXFileSystem(fs_id=98, cli=t_nas())
resp = fs.delete()
assert_that(resp.is_ok(), equal_to(True))
@patch_post
def test_create_filesystem_invalid_vdm_id(self):
def f():
VNXFileSystem.create(t_nas(), 'test18', size_kb=1, pool=0,
mover=1, is_vdm=True)
assert_that(f, raises(VNXBackendError,
'VDM with id=1 not found.'))
@patch_post
def test_create_filesystem_invalid_pool(self):
def f():
VNXFileSystem.create(t_nas(), 'test17', size_kb=1, pool=0, mover=1)
assert_that(f, raises(VNXBackendError,
'Storage pool was not specified or invalid'))
@patch_post
def test_create_filesystem_invalid_size(self):
def f():
VNXFileSystem.create(t_nas(), 'test16', size_kb=1, pool=59,
mover=1)
assert_that(f, raises(VNXBackendError,
'specified size cannot be created'))
@patch_post
def test_create_filesystem_not_enough_space(self):
def f():
VNXFileSystem.create(t_nas(), 'test15', size_kb=1024 ** 2 * 5,
pool=59, mover=1)
assert_that(f, raises(VNXBackendError,
'is not available from the pool'))
@patch_post
def test_create_filesystem_invalid_mover_id(self):
def f():
VNXFileSystem.create(t_nas(), 'test13', size_kb=1024 * 5,
pool=61, mover=6)
assert_that(f, raises(VNXInvalidMoverID,
'Mover with id=6 not found.'))
@patch_post
def test_create_filesystem(self):
ret = VNXFileSystem.create(t_nas(), 'test14', size_kb=1024 * 5,
pool=61, mover=1)
assert_that(ret, instance_of(VNXFileSystem))
@patch_post
def test_create_fs_existed(self):
def f():
VNXFileSystem.create(t_nas(), 'EG_TEST_POOL',
size_kb=1024 * 2 * 5,
pool=32, mover=1)
assert_that(f, raises(VNXFsExistedError, 'already exists'))
@patch_post
def test_extend_fs(self):
fs = VNXFileSystem(cli=t_nas(), fs_id=243)
resp = fs.extend(1024 * 4)
assert_that(resp.is_ok(), equal_to(True))
@patch_post
def test_extend_fs_too_small(self):
def f():
fs = VNXFileSystem(cli=t_nas(), fs_id=243)
fs.extend(1024 * 2)
assert_that(f, raises(VNXBackendError, 'not valid'))
@patch_post
def test_create_fs_snap(self):
fs = VNXFileSystem(cli=t_nas(), fs_id=222)
snap = fs.create_snap('test', pool=61)
assert_that(snap.name, equal_to('test'))
assert_that(snap.fs_id, equal_to(222))
assert_that(snap.existed, equal_to(True))
| {
"content_hash": "467bdba037d1ef7dc2c3c70f326739f4",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 33.815642458100555,
"alnum_prop": 0.5684784404427556,
"repo_name": "emc-openstack/storops",
"id": "8c8af55c2222443a9971271c41134c5a9bea2dcb",
"size": "6703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storops_test/vnx/resource/test_fs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1807840"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('pages', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='page',
name='content',
field=ckeditor.fields.RichTextField(verbose_name='Content'),
),
]
| {
"content_hash": "0898d8bdffdd80c9387d065e5a3368f1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 21.105263157894736,
"alnum_prop": 0.6084788029925187,
"repo_name": "MeirKriheli/debian.org.il",
"id": "65e808fe29401e1c447a42919d4d99ae8c2364c3",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/pages/migrations/0002_auto_20171011_0017.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3500"
},
{
"name": "HTML",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "28325"
}
],
"symlink_target": ""
} |
"""Module contains Methods used for figuring out which/how many factors to retain
"""
import numpy as np
from fa_kit.broken_stick import BrokenStick
def retain_top_n(vals, num_keep):
"""Retain the top N largest components"""
if num_keep < 1:
raise ValueError(
"Must select num_keep >= 1 when using 'top_n' retention "
"criterion. Currently, num_keep = {}".format(num_keep))
absmag_order = np.argsort(-np.abs(vals))
retain_idx = absmag_order[:num_keep]
return retain_idx
def retain_top_pct(vals, pct_keep):
"""
Retain as many components as you need to capture `pct_keep` proportion
of the overall value
"""
if pct_keep > 1 or pct_keep <= 0:
raise ValueError(
"Must set pct_keep between 0 and 1 be when using "
"'retain_top_pct' retention criterion. "
"Currently, pct_keep = {}".format(pct_keep))
absmag_order = np.argsort(-np.abs(vals))
cum_pct = 0.0
retain_idx = []
for idx in absmag_order:
if cum_pct < pct_keep:
retain_idx.append(idx)
cum_pct += np.abs(vals[idx])
else:
break
return retain_idx
def retain_kaiser(vals, data_dim):
"""
Use Kaiser's criterion for retention.
Normally, this is 'keep anything with more than (1/dim)% of total variance'
but we don't always know how many dimensions there are because eigenvalues
of 0 get cropped out. So we have you enter the dimensionality yourself.
"""
if data_dim is None or data_dim < len(vals):
raise ValueError(
"data_dim is missing or improperly specified "
"for Kaiser criterion. Current value {}".format(data_dim)
)
cutoff_value = 1.0 / data_dim
retain_idx = [
key
for key, val in enumerate(vals)
if np.abs(val) > cutoff_value
]
return retain_idx
def retain_broken_stick(vals, broken_stick):
"""
Figure out how many components to keep by aligning
the dsitribution with a broken stick distribution
and seeing where your values are larger than expected
"""
vals = np.array(sorted(vals)[::-1])
retain_idx = broken_stick.find_where_target_exceeds(vals)
return retain_idx
| {
"content_hash": "4813c7f5c5af177eae1d99a972258948",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 81,
"avg_line_length": 26.53488372093023,
"alnum_prop": 0.6205083260297984,
"repo_name": "bmcmenamin/fa_kit",
"id": "6e6b7756b05a1e30c86e196d5881963d5c42d1c6",
"size": "2282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fa_kit/retention.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46036"
}
],
"symlink_target": ""
} |
"""project related functions"""
import os
import shutil
import subprocess
import yaml
from mod import log, util, config, dep, template, settings, android, emsdk
from mod.tools import git, cmake, make, ninja, xcodebuild, xcrun, ccmake, cmake_gui, vscode, clion
#-------------------------------------------------------------------------------
def init(fips_dir, proj_name) :
"""initialize an existing project directory as a fips directory by
copying essential files and creating or updating .gitignore
:param fips_dir: absolute path to fips
:param proj_name: project directory name (dir must exist)
:returns: True if the project was successfully initialized
"""
ws_dir = util.get_workspace_dir(fips_dir)
proj_dir = util.get_project_dir(fips_dir, proj_name)
if os.path.isdir(proj_dir) :
templ_values = {
'project': proj_name
}
for f in ['CMakeLists.txt', 'fips', 'fips.cmd', 'fips.yml'] :
template.copy_template_file(fips_dir, proj_dir, f, templ_values)
os.chmod(proj_dir + '/fips', 0o744)
gitignore_entries = ['.fips-*', '*.pyc', '.vscode/', '.idea/']
template.write_git_ignore(proj_dir, gitignore_entries)
else :
log.error("project dir '{}' does not exist".format(proj_dir))
return False
#-------------------------------------------------------------------------------
def clone(fips_dir, url) :
"""clone an existing fips project with git, do NOT fetch dependencies
:param fips_dir: absolute path to fips
:param url: git url to clone from (may contain branch name separated by '#')
:return: True if project was successfully cloned
"""
ws_dir = util.get_workspace_dir(fips_dir)
proj_name = util.get_project_name_from_url(url)
proj_dir = util.get_project_dir(fips_dir, proj_name)
if not os.path.isdir(proj_dir) :
git_url = util.get_giturl_from_url(url)
git_branch = util.get_gitbranch_from_url(url)
if git.clone(git_url, git_branch, git.clone_depth, proj_name, ws_dir) :
# fetch imports
dep.fetch_imports(fips_dir, proj_dir)
return True
else :
log.error("failed to 'git clone {}' into '{}'".format(url, proj_dir))
return False
else :
log.error("project dir '{}' already exists".format(proj_dir))
return False
#-------------------------------------------------------------------------------
def gen_project(fips_dir, proj_dir, cfg, force) :
"""private: generate build files for one config"""
proj_name = util.get_project_name_from_dir(proj_dir)
build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])
defines = {}
defines['FIPS_USE_CCACHE'] = 'ON' if settings.get(proj_dir, 'ccache') else 'OFF'
defines['FIPS_AUTO_IMPORT'] = 'OFF' if dep.get_policy(proj_dir, 'no_auto_import') else 'ON'
if cfg['generator'] in ['Ninja', 'Unix Makefiles']:
defines['CMAKE_EXPORT_COMPILE_COMMANDS'] = 'ON'
if cfg['platform'] == 'ios':
defines['CMAKE_OSX_SYSROOT'] = xcrun.get_ios_sdk_sysroot()
ios_team_id = settings.get(proj_dir, 'iosteam')
if ios_team_id:
defines['FIPS_IOS_TEAMID'] = ios_team_id
if cfg['platform'] == 'osx':
defines['CMAKE_OSX_SYSROOT'] = xcrun.get_macos_sdk_sysroot()
if cfg['platform'] == 'emscripten':
defines['EMSCRIPTEN_ROOT'] = emsdk.get_emscripten_root(fips_dir)
do_it = force
if not os.path.isdir(build_dir) :
os.makedirs(build_dir)
if not os.path.isfile(build_dir + '/CMakeCache.txt'):
do_it = True
if do_it :
# if Ninja build tool and on Windows, need to copy
# the precompiled ninja.exe to the build dir
log.colored(log.YELLOW, "=== generating: {}".format(cfg['name']))
log.info("config file: {}".format(cfg['path']))
toolchain_path = config.get_toolchain(fips_dir, proj_dir, cfg)
if toolchain_path :
log.info("Using Toolchain File: {}".format(toolchain_path))
cmake_result = cmake.run_gen(cfg, fips_dir, proj_dir, build_dir, toolchain_path, defines)
if vscode.match(cfg['build_tool']):
vscode.write_workspace_settings(fips_dir, proj_dir, cfg)
if clion.match(cfg['build_tool']):
clion.write_workspace_settings(fips_dir, proj_dir, cfg)
return cmake_result
else :
return True
#-------------------------------------------------------------------------------
def gen(fips_dir, proj_dir, cfg_name) :
"""generate build files with cmake
:param fips_dir: absolute path to fips
:param proj_dir: absolute path to project
:param cfg_name: config name or pattern (e.g. osx-make-debug)
:returns: True if successful
"""
# prepare
dep.fetch_imports(fips_dir, proj_dir)
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
dep.gather_and_write_imports(fips_dir, proj_dir, cfg_name)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
num_valid_configs = 0
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, proj_dir, cfg, print_errors = True)
if config_valid :
if gen_project(fips_dir, proj_dir, cfg, True) :
num_valid_configs += 1
else :
log.error("failed to generate build files for config '{}'".format(cfg['name']), False)
else :
log.error("'{}' is not a valid config".format(cfg['name']), False)
else :
log.error("No configs found for '{}'".format(cfg_name))
if num_valid_configs != len(configs) :
log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs)))
return False
else :
log.colored(log.GREEN, '{} configs generated'.format(num_valid_configs))
return True
#-------------------------------------------------------------------------------
def configure(fips_dir, proj_dir, cfg_name) :
"""run ccmake or cmake-gui on the provided project and config
:param fips_dir: absolute fips path
:param proj_dir: absolute project dir
:cfg_name: build config name
"""
dep.fetch_imports(fips_dir, proj_dir)
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
dep.gather_and_write_imports(fips_dir, proj_dir, cfg_name)
# load configs, if more then one, only use first one
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
cfg = configs[0]
log.colored(log.YELLOW, '=== configuring: {}'.format(cfg['name']))
# generate build files
if not gen_project(fips_dir, proj_dir, cfg, True) :
log.error("Failed to generate '{}' of project '{}'".format(cfg['name'], proj_name))
# run ccmake or cmake-gui
build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])
if ccmake.check_exists(fips_dir) :
ccmake.run(build_dir)
elif cmake_gui.check_exists(fips_dir) :
cmake_gui.run(build_dir)
else :
log.error("Neither 'ccmake' nor 'cmake-gui' found (run 'fips diag')")
else :
log.error("No configs found for '{}'".format(cfg_name))
#-------------------------------------------------------------------------------
def make_clean(fips_dir, proj_dir, cfg_name) :
"""perform a 'make clean' on the project
:param fips_dir: absolute path of fips
:param proj_dir: absolute path of project dir
:param cfg_name: config name or pattern
"""
proj_name = util.get_project_name_from_dir(proj_dir)
configs = config.load(fips_dir, proj_dir, cfg_name)
num_valid_configs = 0
if configs :
for cfg in configs :
config_valid, _ = config.check_config_valid(fips_dir, proj_dir, cfg, print_errors=True)
if config_valid :
log.colored(log.YELLOW, "=== cleaning: {}".format(cfg['name']))
build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])
result = False
if make.match(cfg['build_tool']):
result = make.run_clean(fips_dir, build_dir)
elif ninja.match(cfg['build_tool']):
result = ninja.run_clean(fips_dir, build_dir)
elif xcodebuild.match(cfg['build_tool']):
result = xcodebuild.run_clean(fips_dir, build_dir)
else :
result = cmake.run_clean(fips_dir, build_dir)
if result :
num_valid_configs += 1
else :
log.error("Failed to clean config '{}' of project '{}'".format(cfg['name'], proj_name))
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
if num_valid_configs != len(configs) :
log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs)))
return False
else :
log.colored(log.GREEN, '{} configs cleaned'.format(num_valid_configs))
return True
#-------------------------------------------------------------------------------
def build(fips_dir, proj_dir, cfg_name, target=None, build_tool_args=None) :
"""perform a build of config(s) in project
:param fips_dir: absolute path of fips
:param proj_dir: absolute path of project dir
:param cfg_name: config name or pattern
:param target: optional target name (build all if None)
:param build_tool_args: optional string array of cmdline args forwarded to the build tool
:returns: True if build was successful
"""
# prepare
dep.fetch_imports(fips_dir, proj_dir)
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
dep.gather_and_write_imports(fips_dir, proj_dir, cfg_name)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
num_valid_configs = 0
if configs :
for cfg in configs :
# check if config is valid
config_valid, _ = config.check_config_valid(fips_dir, proj_dir, cfg, print_errors=True)
if config_valid :
log.colored(log.YELLOW, "=== building: {}".format(cfg['name']))
if not gen_project(fips_dir, proj_dir, cfg, False) :
log.error("Failed to generate '{}' of project '{}'".format(cfg['name'], proj_name))
# select and run build tool
build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])
num_jobs = settings.get(proj_dir, 'jobs')
result = False
if make.match(cfg['build_tool']):
result = make.run_build(fips_dir, target, build_dir, num_jobs, build_tool_args)
elif ninja.match(cfg['build_tool']):
result = ninja.run_build(fips_dir, target, build_dir, num_jobs, build_tool_args)
elif xcodebuild.match(cfg['build_tool']):
result = xcodebuild.run_build(fips_dir, target, cfg['build_type'], build_dir, num_jobs, build_tool_args)
else :
result = cmake.run_build(fips_dir, target, cfg['build_type'], build_dir, num_jobs, build_tool_args)
if result :
num_valid_configs += 1
else :
log.error("Failed to build config '{}' of project '{}'".format(cfg['name'], proj_name))
else :
log.error("Config '{}' not valid in this environment".format(cfg['name']))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
if num_valid_configs != len(configs) :
log.error('{} out of {} configs failed!'.format(len(configs) - num_valid_configs, len(configs)))
return False
else :
log.colored(log.GREEN, '{} configs built'.format(num_valid_configs))
return True
#-------------------------------------------------------------------------------
def run(fips_dir, proj_dir, cfg_name, target_name, target_args, target_cwd) :
"""run a build target executable
:param fips_dir: absolute path of fips
:param proj_dir: absolute path of project dir
:param cfg_name: config name or pattern
:param target_name: the target name
:param target_args: command line arguments for build target
:param target_cwd: working directory or None
"""
retcode = 10
proj_name = util.get_project_name_from_dir(proj_dir)
util.ensure_valid_project_dir(proj_dir)
# load the config(s)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
for cfg in configs :
log.colored(log.YELLOW, "=== run '{}' (config: {}, project: {}):".format(target_name, cfg['name'], proj_name))
# find deploy dir where executables live
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg['name'])
if not target_cwd :
target_cwd = deploy_dir
if cfg['platform'] == 'emscripten':
# special case: emscripten app
html_name = target_name + '.html'
if util.get_host_platform() == 'osx' :
try :
subprocess.call(
'open http://localhost:8080/{} ; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return 0
elif util.get_host_platform() == 'win' :
try :
cmd = 'cmd /c start http://localhost:8080/{} && http-server -c-1 -g'.format(html_name)
subprocess.call(cmd, cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return 0
elif util.get_host_platform() == 'linux' :
try :
subprocess.call(
'xdg-open http://localhost:8080/{}; http-server -c-1 -g'.format(html_name),
cwd = target_cwd, shell=True)
except KeyboardInterrupt :
return 0
else :
log.error("don't know how to start HTML app on this platform")
elif cfg['platform'] == 'android' :
try :
adb_path = android.get_adb_path(fips_dir)
pkg_name = android.target_to_package_name(target_name)
# Android: first re-install the apk...
cmd = '{} install -r {}.apk'.format(adb_path, target_name)
subprocess.call(cmd, shell=True, cwd=deploy_dir)
# ...then start the apk
cmd = '{} shell am start -n {}/android.app.NativeActivity'.format(adb_path, pkg_name)
subprocess.call(cmd, shell=True)
# ...then run adb logcat
cmd = '{} logcat'.format(adb_path)
subprocess.call(cmd, shell=True)
return 0
except KeyboardInterrupt :
return 0
elif os.path.isdir('{}/{}.app'.format(deploy_dir, target_name)) :
# special case: Mac app
cmd_line = '{}/{}.app/Contents/MacOS/{}'.format(deploy_dir, target_name, target_name)
else :
cmd_line = '{}/{}'.format(deploy_dir, target_name)
if cmd_line :
if target_args :
cmd_line += ' ' + ' '.join(target_args)
try:
retcode = subprocess.call(args=cmd_line, cwd=target_cwd, shell=True)
except OSError as e:
log.error("Failed to execute '{}' with '{}'".format(target_name, e.strerror))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
return retcode
#-------------------------------------------------------------------------------
def clean(fips_dir, proj_dir, cfg_name) :
"""clean build files
:param fips_dir: absolute path of fips
:param proj_dir: absolute project path
:param cfg_name: config name (or pattern)
"""
proj_name = util.get_project_name_from_dir(proj_dir)
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
num_cleaned_configs = 0
for cfg in configs :
build_dir = util.get_build_dir(fips_dir, proj_name, cfg['name'])
build_dir_exists = os.path.isdir(build_dir)
deploy_dir = util.get_deploy_dir(fips_dir, proj_name, cfg['name'])
deploy_dir_exists = os.path.isdir(deploy_dir)
if build_dir_exists or deploy_dir_exists :
log.colored(log.YELLOW, "=== clean: {}".format(cfg['name']))
num_cleaned_configs += 1
if build_dir_exists :
shutil.rmtree(build_dir)
log.info(" deleted '{}'".format(build_dir))
if deploy_dir_exists :
shutil.rmtree(deploy_dir)
log.info(" deleted '{}'".format(deploy_dir))
if num_cleaned_configs == 0 :
log.colored(log.YELLOW, "=== clean: nothing to clean for {}".format(cfg_name))
else :
log.error("No valid configs found for '{}'".format(cfg_name))
#-------------------------------------------------------------------------------
def get_target_list(fips_dir, proj_dir, cfg_name) :
"""get project targets config name, only works
if a cmake run was performed before
:param fips_dir: absolute path to fips
:param proj_dir: absolute project path
:param cfg_name: the config name
:returns: (success, targets)
"""
configs = config.load(fips_dir, proj_dir, cfg_name)
if configs :
return util.get_cfg_target_list(fips_dir, proj_dir, configs[0])
else :
log.error("No valid configs found for '{}'".format(cfg_name))
| {
"content_hash": "12eaba2115161b05a14c4a343d0466cc",
"timestamp": "",
"source": "github",
"line_count": 417,
"max_line_length": 124,
"avg_line_length": 44.31414868105516,
"alnum_prop": 0.545321716543103,
"repo_name": "code-disaster/fips",
"id": "91a042b3013c327cdbd52fabed92831e6a702823",
"size": "18479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mod/project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "CMake",
"bytes": "120172"
},
{
"name": "Java",
"bytes": "162"
},
{
"name": "Python",
"bytes": "632029"
},
{
"name": "Vim script",
"bytes": "211"
}
],
"symlink_target": ""
} |
"""
Python API for Product Hunt.
Unofficial Python API for Product Hunt.
@author Arindam Pradhan
@email arindampradhan10@gmail.com
"""
import re
import time
import operator
import urlparse
import requests
from bs4 import BeautifulSoup
from .utils import get_soup, comment_soup,striphtml,strp
from .constants import BASE_URL, INTERVAL_BETWEEN_REQUESTS
class PH(object):
"""The classes that parses the PH page,and builds the stories"""
def __init__(self):
self.more = ''
def _next_page(self,current_page):
"""
Get the relative url of the next page adding a ?page= query
"""
# not in use
return urlparse.urljoin(BASE_URL ,("?page=" + str(current_page+1) ))
@classmethod
def _build_product(self,soup):
"""
Builds and returns a list of products from the front page or the given num page
"""
all_products = []
page = requests.get(BASE_URL).text
soup = BeautifulSoup(page)
posts_html = soup.find('div',id="posts-wrapper")
post = posts_html.find_all('li',class_=re.compile(r'(post m-hidden|post)'))
for product in post:
num_comments = int(product.find('span', class_="comment-count").string)
product_id = int(product.find('div',class_="upvote")['data-vote-id'])
upvote = int(product.find('span',class_="vote-count").string)
product.find('span', class_="comment-count")
published_time = product.find_parent("div",class_=re.compile(r'(day today|day)')).time['datetime']
prod = product.find('div',class_="url")
url = prod.a['href']
link = urlparse.urljoin(BASE_URL,url)
domain = link #requests.get(link).url ##will make it slow
title = prod.a.string
description = prod.find('span', class_="post-tagline description").string
user = product.find('div', class_="user-hover-card").h3.string
published_time = product.find_parent('div', class_=re.compile(r'(day today|day)')).time['datetime']
submitter_card = product.find('div', class_="user-hover-card")
submitter = submitter_card.h3.string
submitter_id = submitter_card.find('a', {'data-component':"FollowMarker"})['data-follow-id']
obj_product = Product(upvote,product_id,title,link,domain,submitter,submitter_id,published_time,num_comments)
all_products.append(obj_product)
return all_products
@classmethod
def get_products(self,limit=30,page_type=1):
"""
Returns a list of product(s) from the passed page
of PH.
'' = newest stories (homepage) (default)
'num' = page {num} products i.e 1,2,3
'top' = most voted products of the page
'newest' = get the newest newest
'limit' is the number of stories required from the given page.
Defaults to 30. Cannot be more than 150 [change the code if you want more !].
"""
max_limit = 150 # change it if you want more
if type(page_type) == int:
soup = get_soup(page_type)
else:
page_type = 1
soup = get_soup(page_type)
if page_type == "newest":
page_type = 1
pager = page_type
if limit == None or limit < 1 :
limit = 30
products_found = 0
products = []
while products_found < limit:
products = products + self._build_product(soup)
products_found = products_found + len(products)
if products_found >= limit or products_found >=max_limit:
if pager == "top":
products = sorted(products, key=operator.attrgetter('upvote'),reverse=True) # orders in decending order
return products[:limit]
page_type = page_type + 1
soup = get_soup(page_type)
class Product(object):
"""Product class represents a single product or post on PH"""
def __init__(self, upvote,product_id,title,link,domain,submitter,\
submitter_id,published_time,num_comments):
self.upvote = upvote
self.product_id = product_id
self.title = title
self.link = link
self.domain = domain
self.submitter = submitter
self.submitter_id = submitter_id
self.published_time = published_time
self.num_comments = num_comments
def __repr__(self):
"""
A string representation of the class object
"""
return '<Product: ID={0}>'.format(self.product_id)
def _build_comments(self,soup):
"""
For the Product,builds and returns a list of comment objects.
"""
COMMENTS = []
c_soup = soup.find('section',class_="modal-post--comments")
post_show = c_soup.find('main', {"data-comment":"list"})
num_comment = c_soup.h2.string.split(" ")[0]
comments_htmls = post_show.find_all("div", {"data-comment":"comment"},recursive=False)
for html_comment in comments_htmls:
sub_comments = html_comment.find_all("span",{'class':"modal-post--comment--avatar" ,'data-popover':"hover"})
parent_id = ""
first_comments = sub_comments[0]
for comment_sub in sub_comments:
body_html = comment_sub.find_next_sibling()
body = body_html('p')
body = striphtml(str(body))
time = body_html.find("span",{"data-component":"TimeAgo"})['title']
user_id= comment_sub.find("a",{"data-component":"FollowMarker"})["data-follow-id"]
user_name = body_html.h1.string
user_name = strp(user_name)
upvote = body_html.find("span",{'data-vote-count':""}).string
upvote = strp(upvote)
comment_id = body_html['id'].split('-')[1]
if "upvote" in upvote.lower():
upvote = None
if comment_sub == first_comments:
parent_id = comment_id
a = Comment(user_id,user_name,comment_id,parent_id,body_html,body,upvote,time)
COMMENTS.append(a)
return COMMENTS
@classmethod
def get(self,product_id):
"""
Initializes an instance of Story for given item_id.
"""
if not product_id:
raise Exception('Need an formid for a Story')
soup = comment_soup(str(product_id))
product = soup.find('div',class_="modal-post")
title = product.find("h1").a.string
link = product.find("h1").a['href']
link = urlparse.urljoin(BASE_URL,link)
upvote = int(product.find('span', class_="vote-count").string)
posted_by = product.find('div', class_="modal-post--submitted")
published_time = posted_by.find('span',{"data-component":"TimeAgo"})['title']
num_comments = int(product.find("section", class_="modal-post--comments").h2.string.split(" ")[0])
domain = requests.get(link).url
submitter = posted_by.find('h3').string
submitter_id = posted_by.find('a',{"data-component":"FollowMarker"})['data-follow-id']
return Product(upvote,product_id,title,link,domain,submitter,submitter_id,published_time,num_comments)
def get_comments(self):
"""
Return a list of Comment(s) for the product
"""
soup = comment_soup(self.product_id)
return self._build_comments(soup)
class User(object):
"""Represents a user in PH"""
def __init__(self, user_id,user_name,about,upvote,submitted,made,followers,followings,twitter):
self.user_id = user_id
self.user_name = user_name
self.about = about
self.upvote = upvote
self.submitted = submitted
self.made = made
self.followers = followers
self.followings = followings
self.twitter = twitter
def __repr__(self):
return """
user_id : {0}
user_name : {1}
about : {2}
twitter : {3}""".format(str(self.user_id),self.user_name,self.about,self.twitter)
def get_follower(self,limit=50,page_count_limit=""):
"""
Gives the ids of followers\n
Default limit = 50
"""
url = "{0}{1}/{2}".format(BASE_URL,self.user_id,"followers")
page_count=1
followers = []
count=0
page_count_limit = int((limit+1)/50)
while page_count <= page_count_limit:
current_page = requests.get(url+"?page="+str(page_count)).text
soup = BeautifulSoup(current_page)
follow_group = soup.find_all("li", class_="people--person")
for follow in follow_group:
follow_id = follow.find('div', class_="user-hover-card").find('a', class_="button")['data-follow-id']
followers.append(follow_id)
count = count + 1
if count >= limit:
break
page_count = page_count+1
if len(soup.find('ul', class_="people").find_all('li',class_='people--person')) == 0:
break
return followers[:limit]
def get_following(self,limit=50,page_count_limit=""):
"""
Gives the ids of the people the user is following\n
Default limit = 50
"""
page_count_limit = int((limit+1)/50)
url = "{0}{1}/{2}".format(BASE_URL,self.user_id,"followings")
page_count=1
followers = []
count=0
while page_count <= page_count_limit:
current_page = requests.get(url+"?page="+str(page_count)).text
soup = BeautifulSoup(current_page)
follow_group = soup.find_all("li", class_="people--person")
for follow in follow_group:
follow_id = follow.find('div', class_="user-hover-card").find('a', class_="button")['data-follow-id']
followers.append(follow_id)
count = count + 1
if count >= limit:
break
page_count = page_count+1
if len(soup.find('ul', class_="people").find_all('li',class_='people--person')) == 0:
break
return followers[:limit]
def get_votes(self,limit=50,page_count_limit=""):
"""
gives the ids of upvoted products,submitted products and made products\n
\tparam = upvoted\n (default)
\tparam = products\n
\tparam = posts\n
"""
if page_count_limit == "":
page_count_limit = int((limit+1)/50)
url = BASE_URL + str(self.user_id)
page_count = 1
data_ids = []
while page_count <= page_count_limit:
current_page = requests.get(url+"?page="+str(page_count)).text
soup = BeautifulSoup(current_page)
post_group = soup.find('ul',class_="posts-group")
post_list = post_group.find_all('li',class_="post")
for pst in post_list:
data_id = pst.find('div',class_="upvote")['data-vote-id']
data_ids.append(data_id)
page_count = page_count+1
if len(soup.find_all('li', class_="post")) is 0:
break
return data_ids[:limit]
# @classmethod
# def _build_follow(self,limit,page_count_limit,follow_type):
# """
# Type is a follower or following
# """
# print self.user_id
# user_id = self.user_id
# url = "{0}{1}/{2}".format(BASE_URL,user_id,follow_type)
# page_count=1
# followers = []
# count=0
# while page_count < page_count_limit:
# current_page = requests.get(url+"?page="+str(page_count)).text
# soup = BeautifulSoup(current_page)
# follow_group = soup.find_all("li", class_="people--person")
# for follow in follow_group:
# follow_id = follow.find('div', class_="user-hover-card").find('a', class_="button")['data-follow-id']
# followers.append(follow_id)
# count = count + 1
# if count >= limit:
# break
# page_count = page_count+1
# if len(soup.find('ul', class_="people").find_all('li',class_='people--person')) == 0:
# break
# return followers[:limit]
@classmethod
def get(self,user_id):
"""
Initialize an instance of a user from a given user id
"""
if not user_id:
raise Exception('Need an user_id for a user')
TAG_RE = re.compile(r'<[^>]+>')
def remove_tags(text):
return TAG_RE.sub('', text)
u_url = "http://www.producthunt.com/" + str(user_id)
req = requests.get(u_url)
u_page = req.text
soup = BeautifulSoup(u_page)
about = soup.find('h2',class_="page-header--subtitle").string
user = soup.find("h1",class_="page-header--title")
user_text = remove_tags(str(user))
user_name = strp(user_text.split("#")[0])
nav = soup.find('nav',class_="page-header--navigation")
upvote_u = nav.find_all('strong')
u_twitter = req.url.split('/')[-1]
# check if it is a user or org (submit or non submit)
if len(upvote_u) == 5:
u_num_upvote = upvote_u[0].string
u_num_submit = upvote_u[1].string
u_num_made = upvote_u[2].string
u_num_followers = upvote_u[3].string
u_num_following = upvote_u[4].string
return User(user_id,user_name,about,u_num_upvote,u_num_submit,u_num_made,u_num_followers,u_num_following,u_twitter)
if len(upvote_u) > 5:
u_num_upvote = upvote_u[0].string
u_num_submit = upvote_u[1].string
u_num_made = upvote_u[2].string
u_num_followers = upvote_u[-2].string
u_num_following = upvote_u[-1].string
return User(user_id,user_name,about,u_num_upvote,u_num_submit,u_num_made,u_num_followers,u_num_following,u_twitter)
if len(upvote_u) == 4:
u_num_upvote = upvote_u[0].string
u_num_made = upvote_u[1].string
u_num_followers = upvote_u[-2].string
u_num_following = upvote_u[-1].string
return User(user_id,user_name,about,u_num_upvote,None,u_num_made,u_num_followers,u_num_following,u_twitter)
class Comment(object):
"""Represents a comment for the discussion in a product"""
def __init__(self, user_id,user_name,comment_id,parent_id,body_html,body,upvote,time):
self.user_id = user_id
self.user_name = user_name
self.comment_id = comment_id
self.parent_id = parent_id
self.body_html = body_html
self.body = body
self.upvote = upvote
self.time = time
def __repr__(self):
return '<Comment: ID={0}>'.format(self.comment_id) | {
"content_hash": "7e101cc6fc51a9bd2d803b82b4073ca3",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 128,
"avg_line_length": 39.90716180371353,
"alnum_prop": 0.5609837155201064,
"repo_name": "arindampradhan/ProductHunt_API",
"id": "5b41ee427ba5b6d315ac9913be6f83544f087a60",
"size": "15068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ph/ph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17470"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.